1 /*
   2  * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "logging/log.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "memory/virtualspace.hpp"
  29 #include "oops/markOop.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "services/memTracker.hpp"
  32 
  33 // ReservedSpace
  34 
  35 // Dummy constructor
  36 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  37     _alignment(0), _special(false), _executable(false) {
  38 }
  39 
  40 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) {
  41   bool has_preferred_page_size = preferred_page_size != 0;
  42   // Want to use large pages where possible and pad with small pages.
  43   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
  44   bool large_pages = page_size != (size_t)os::vm_page_size();
  45   size_t alignment;
  46   if (large_pages && has_preferred_page_size) {
  47     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  48     // ReservedSpace initialization requires size to be aligned to the given
  49     // alignment. Align the size up.
  50     size = align_size_up(size, alignment);
  51   } else {
  52     // Don't force the alignment to be large page aligned,
  53     // since that will waste memory.
  54     alignment = os::vm_allocation_granularity();
  55   }
  56   initialize(size, alignment, large_pages, NULL, false);
  57 }
  58 
  59 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  60                              bool large,
  61                              char* requested_address) {
  62   initialize(size, alignment, large, requested_address, false);
  63 }
  64 
  65 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  66                              bool large,
  67                              bool executable) {
  68   initialize(size, alignment, large, NULL, executable);
  69 }
  70 
  71 // Helper method.
  72 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  73                                            const size_t size, bool special)
  74 {
  75   if (base == requested_address || requested_address == NULL)
  76     return false; // did not fail
  77 
  78   if (base != NULL) {
  79     // Different reserve address may be acceptable in other cases
  80     // but for compressed oops heap should be at requested address.
  81     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  82     log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
  83     // OS ignored requested address. Try different address.
  84     if (special) {
  85       if (!os::release_memory_special(base, size)) {
  86         fatal("os::release_memory_special failed");
  87       }
  88     } else {
  89       if (!os::release_memory(base, size)) {
  90         fatal("os::release_memory failed");
  91       }
  92     }
  93   }
  94   return true;
  95 }
  96 
  97 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
  98                                char* requested_address,
  99                                bool executable) {
 100   const size_t granularity = os::vm_allocation_granularity();
 101   assert((size & (granularity - 1)) == 0,
 102          "size not aligned to os::vm_allocation_granularity()");
 103   assert((alignment & (granularity - 1)) == 0,
 104          "alignment not aligned to os::vm_allocation_granularity()");
 105   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 106          "not a power of 2");
 107 
 108   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 109 
 110   _base = NULL;
 111   _size = 0;
 112   _special = false;
 113   _executable = executable;
 114   _alignment = 0;
 115   _noaccess_prefix = 0;
 116   if (size == 0) {
 117     return;
 118   }
 119 
 120   // If OS doesn't support demand paging for large page memory, we need
 121   // to use reserve_memory_special() to reserve and pin the entire region.
 122   bool special = large && !os::can_commit_large_page_memory();
 123   char* base = NULL;
 124 
 125   if (special) {
 126 
 127     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 128 
 129     if (base != NULL) {
 130       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 131         // OS ignored requested address. Try different address.
 132         return;
 133       }
 134       // Check alignment constraints.
 135       assert((uintptr_t) base % alignment == 0,
 136              "Large pages returned a non-aligned address, base: "
 137              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 138              p2i(base), alignment);
 139       _special = true;
 140     } else {
 141       // failed; try to reserve regular memory below
 142       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 143                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 144         log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 145       }
 146     }
 147   }
 148 
 149   if (base == NULL) {
 150     // Optimistically assume that the OSes returns an aligned base pointer.
 151     // When reserving a large address range, most OSes seem to align to at
 152     // least 64K.
 153 
 154     // If the memory was requested at a particular address, use
 155     // os::attempt_reserve_memory_at() to avoid over mapping something
 156     // important.  If available space is not detected, return NULL.
 157 
 158     if (requested_address != 0) {
 159       base = os::attempt_reserve_memory_at(size, requested_address);
 160       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 161         // OS ignored requested address. Try different address.
 162         base = NULL;
 163       }
 164     } else {
 165       base = os::reserve_memory(size, NULL, alignment);
 166     }
 167 
 168     if (base == NULL) return;
 169 
 170     // Check alignment constraints
 171     if ((((size_t)base) & (alignment - 1)) != 0) {
 172       // Base not aligned, retry
 173       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 174       // Make sure that size is aligned
 175       size = align_size_up(size, alignment);
 176       base = os::reserve_memory_aligned(size, alignment);
 177 
 178       if (requested_address != 0 &&
 179           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 180         // As a result of the alignment constraints, the allocated base differs
 181         // from the requested address. Return back to the caller who can
 182         // take remedial action (like try again without a requested address).
 183         assert(_base == NULL, "should be");
 184         return;
 185       }
 186     }
 187   }
 188   // Done
 189   _base = base;
 190   _size = size;
 191   _alignment = alignment;
 192 }
 193 
 194 
 195 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 196                              bool special, bool executable) {
 197   assert((size % os::vm_allocation_granularity()) == 0,
 198          "size not allocation aligned");
 199   _base = base;
 200   _size = size;
 201   _alignment = alignment;
 202   _noaccess_prefix = 0;
 203   _special = special;
 204   _executable = executable;
 205 }
 206 
 207 
 208 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 209                                         bool split, bool realloc) {
 210   assert(partition_size <= size(), "partition failed");
 211   if (split) {
 212     os::split_reserved_memory(base(), size(), partition_size, realloc);
 213   }
 214   ReservedSpace result(base(), partition_size, alignment, special(),
 215                        executable());
 216   return result;
 217 }
 218 
 219 
 220 ReservedSpace
 221 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 222   assert(partition_size <= size(), "partition failed");
 223   ReservedSpace result(base() + partition_size, size() - partition_size,
 224                        alignment, special(), executable());
 225   return result;
 226 }
 227 
 228 
 229 size_t ReservedSpace::page_align_size_up(size_t size) {
 230   return align_size_up(size, os::vm_page_size());
 231 }
 232 
 233 
 234 size_t ReservedSpace::page_align_size_down(size_t size) {
 235   return align_size_down(size, os::vm_page_size());
 236 }
 237 
 238 
 239 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 240   return align_size_up(size, os::vm_allocation_granularity());
 241 }
 242 
 243 
 244 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 245   return align_size_down(size, os::vm_allocation_granularity());
 246 }
 247 
 248 
 249 void ReservedSpace::release() {
 250   if (is_reserved()) {
 251     char *real_base = _base - _noaccess_prefix;
 252     const size_t real_size = _size + _noaccess_prefix;
 253     if (special()) {
 254       os::release_memory_special(real_base, real_size);
 255     } else{
 256       os::release_memory(real_base, real_size);
 257     }
 258     _base = NULL;
 259     _size = 0;
 260     _noaccess_prefix = 0;
 261     _alignment = 0;
 262     _special = false;
 263     _executable = false;
 264   }
 265 }
 266 
 267 static size_t noaccess_prefix_size(size_t alignment) {
 268   return lcm(os::vm_page_size(), alignment);
 269 }
 270 
 271 void ReservedHeapSpace::establish_noaccess_prefix() {
 272   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 273   _noaccess_prefix = noaccess_prefix_size(_alignment);
 274 
 275   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 276     if (true
 277         WIN64_ONLY(&& !UseLargePages)
 278         AIX_ONLY(&& os::vm_page_size() != 64*K)) {
 279       // Protect memory at the base of the allocated region.
 280       // If special, the page was committed (only matters on windows)
 281       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 282         fatal("cannot protect protection page");
 283       }
 284       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
 285                                  PTR_FORMAT " / " INTX_FORMAT " bytes",
 286                                  p2i(_base),
 287                                  _noaccess_prefix);
 288       assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
 289     } else {
 290       Universe::set_narrow_oop_use_implicit_null_checks(false);
 291     }
 292   }
 293 
 294   _base += _noaccess_prefix;
 295   _size -= _noaccess_prefix;
 296   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 297 }
 298 
 299 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 300 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 301 // might still fulfill the wishes of the caller.
 302 // Assures the memory is aligned to 'alignment'.
 303 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 304 void ReservedHeapSpace::try_reserve_heap(size_t size,
 305                                          size_t alignment,
 306                                          bool large,
 307                                          char* requested_address) {
 308   if (_base != NULL) {
 309     // We tried before, but we didn't like the address delivered.
 310     release();
 311   }
 312 
 313   // If OS doesn't support demand paging for large page memory, we need
 314   // to use reserve_memory_special() to reserve and pin the entire region.
 315   bool special = large && !os::can_commit_large_page_memory();
 316   char* base = NULL;
 317 
 318   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
 319                              " heap of size " SIZE_FORMAT_HEX,
 320                              p2i(requested_address),
 321                              size);
 322 
 323   if (special) {
 324     base = os::reserve_memory_special(size, alignment, requested_address, false);
 325 
 326     if (base != NULL) {
 327       // Check alignment constraints.
 328       assert((uintptr_t) base % alignment == 0,
 329              "Large pages returned a non-aligned address, base: "
 330              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 331              p2i(base), alignment);
 332       _special = true;
 333     }
 334   }
 335 
 336   if (base == NULL) {
 337     // Failed; try to reserve regular memory below
 338     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 339                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 340       log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 341     }
 342 
 343     // Optimistically assume that the OSes returns an aligned base pointer.
 344     // When reserving a large address range, most OSes seem to align to at
 345     // least 64K.
 346 
 347     // If the memory was requested at a particular address, use
 348     // os::attempt_reserve_memory_at() to avoid over mapping something
 349     // important.  If available space is not detected, return NULL.
 350 
 351     if (requested_address != 0) {
 352       base = os::attempt_reserve_memory_at(size, requested_address);
 353     } else {
 354       base = os::reserve_memory(size, NULL, alignment);
 355     }
 356   }
 357   if (base == NULL) { return; }
 358 
 359   // Done
 360   _base = base;
 361   _size = size;
 362   _alignment = alignment;
 363 
 364   // Check alignment constraints
 365   if ((((size_t)base) & (alignment - 1)) != 0) {
 366     // Base not aligned, retry.
 367     release();
 368   }
 369 }
 370 
 371 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 372                                           char *lowest_start,
 373                                           size_t attach_point_alignment,
 374                                           char *aligned_heap_base_min_address,
 375                                           char *upper_bound,
 376                                           size_t size,
 377                                           size_t alignment,
 378                                           bool large) {
 379   const size_t attach_range = highest_start - lowest_start;
 380   // Cap num_attempts at possible number.
 381   // At least one is possible even for 0 sized attach range.
 382   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
 383   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
 384 
 385   const size_t stepsize = (attach_range == 0) ? // Only one try.
 386     (size_t) highest_start : align_size_up(attach_range / num_attempts_to_try, attach_point_alignment);
 387 
 388   // Try attach points from top to bottom.
 389   char* attach_point = highest_start;
 390   while (attach_point >= lowest_start  &&
 391          attach_point <= highest_start &&  // Avoid wrap around.
 392          ((_base == NULL) ||
 393           (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
 394     try_reserve_heap(size, alignment, large, attach_point);
 395     attach_point -= stepsize;
 396   }
 397 }
 398 
 399 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
 400 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
 401 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
 402 
 403 // Helper for heap allocation. Returns an array with addresses
 404 // (OS-specific) which are suited for disjoint base mode. Array is
 405 // NULL terminated.
 406 static char** get_attach_addresses_for_disjoint_mode() {
 407   static uint64_t addresses[] = {
 408      2 * SIZE_32G,
 409      3 * SIZE_32G,
 410      4 * SIZE_32G,
 411      8 * SIZE_32G,
 412     10 * SIZE_32G,
 413      1 * SIZE_64K * SIZE_32G,
 414      2 * SIZE_64K * SIZE_32G,
 415      3 * SIZE_64K * SIZE_32G,
 416      4 * SIZE_64K * SIZE_32G,
 417     16 * SIZE_64K * SIZE_32G,
 418     32 * SIZE_64K * SIZE_32G,
 419     34 * SIZE_64K * SIZE_32G,
 420     0
 421   };
 422 
 423   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
 424   // the array is sorted.
 425   uint i = 0;
 426   while (addresses[i] != 0 &&
 427          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
 428     i++;
 429   }
 430   uint start = i;
 431 
 432   // Avoid more steps than requested.
 433   i = 0;
 434   while (addresses[start+i] != 0) {
 435     if (i == HeapSearchSteps) {
 436       addresses[start+i] = 0;
 437       break;
 438     }
 439     i++;
 440   }
 441 
 442   return (char**) &addresses[start];
 443 }
 444 
 445 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
 446   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
 447             "can not allocate compressed oop heap for this size");
 448   guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
 449   assert(HeapBaseMinAddress > 0, "sanity");
 450 
 451   const size_t granularity = os::vm_allocation_granularity();
 452   assert((size & (granularity - 1)) == 0,
 453          "size not aligned to os::vm_allocation_granularity()");
 454   assert((alignment & (granularity - 1)) == 0,
 455          "alignment not aligned to os::vm_allocation_granularity()");
 456   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 457          "not a power of 2");
 458 
 459   // The necessary attach point alignment for generated wish addresses.
 460   // This is needed to increase the chance of attaching for mmap and shmat.
 461   const size_t os_attach_point_alignment =
 462     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
 463     NOT_AIX(os::vm_allocation_granularity());
 464   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 465 
 466   char *aligned_heap_base_min_address = (char *)align_ptr_up((void *)HeapBaseMinAddress, alignment);
 467   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
 468     noaccess_prefix_size(alignment) : 0;
 469 
 470   // Attempt to alloc at user-given address.
 471   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 472     try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
 473     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
 474       release();
 475     }
 476   }
 477 
 478   // Keep heap at HeapBaseMinAddress.
 479   if (_base == NULL) {
 480 
 481     // Try to allocate the heap at addresses that allow efficient oop compression.
 482     // Different schemes are tried, in order of decreasing optimization potential.
 483     //
 484     // For this, try_reserve_heap() is called with the desired heap base addresses.
 485     // A call into the os layer to allocate at a given address can return memory
 486     // at a different address than requested.  Still, this might be memory at a useful
 487     // address. try_reserve_heap() always returns this allocated memory, as only here
 488     // the criteria for a good heap are checked.
 489 
 490     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 491     // Give it several tries from top of range to bottom.
 492     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
 493 
 494       // Calc address range within we try to attach (range of possible start addresses).
 495       char* const highest_start = (char *)align_ptr_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 496       char* const lowest_start  = (char *)align_ptr_up(aligned_heap_base_min_address, attach_point_alignment);
 497       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 498                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
 499     }
 500 
 501     // zerobased: Attempt to allocate in the lower 32G.
 502     // But leave room for the compressed class pointers, which is allocated above
 503     // the heap.
 504     char *zerobased_max = (char *)OopEncodingHeapMax;
 505     const size_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
 506     // For small heaps, save some space for compressed class pointer
 507     // space so it can be decoded with no base.
 508     if (UseCompressedClassPointers && !UseSharedSpaces &&
 509         OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
 510         (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
 511       zerobased_max = (char *)OopEncodingHeapMax - class_space;
 512     }
 513 
 514     // Give it several tries from top of range to bottom.
 515     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
 516         ((_base == NULL) ||                        // No previous try succeeded.
 517          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 518 
 519       // Calc address range within we try to attach (range of possible start addresses).
 520       char *const highest_start = (char *)align_ptr_down(zerobased_max - size, attach_point_alignment);
 521       // Need to be careful about size being guaranteed to be less
 522       // than UnscaledOopHeapMax due to type constraints.
 523       char *lowest_start = aligned_heap_base_min_address;
 524       uint64_t unscaled_end = UnscaledOopHeapMax - size;
 525       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
 526         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
 527       }
 528       lowest_start  = (char *)align_ptr_up(lowest_start, attach_point_alignment);
 529       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 530                         aligned_heap_base_min_address, zerobased_max, size, alignment, large);
 531     }
 532 
 533     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 534     // implement null checks.
 535     noaccess_prefix = noaccess_prefix_size(alignment);
 536 
 537     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
 538     char** addresses = get_attach_addresses_for_disjoint_mode();
 539     int i = 0;
 540     while (addresses[i] &&                                 // End of array not yet reached.
 541            ((_base == NULL) ||                             // No previous try succeeded.
 542             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 543              !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 544       char* const attach_point = addresses[i];
 545       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 546       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 547       i++;
 548     }
 549 
 550     // Last, desperate try without any placement.
 551     if (_base == NULL) {
 552       log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
 553       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 554     }
 555   }
 556 }
 557 
 558 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() {
 559 
 560   if (size == 0) {
 561     return;
 562   }
 563 
 564   // Heap size should be aligned to alignment, too.
 565   guarantee(is_size_aligned(size, alignment), "set by caller");
 566 
 567   if (UseCompressedOops) {
 568     initialize_compressed_heap(size, alignment, large);
 569     if (_size > size) {
 570       // We allocated heap with noaccess prefix.
 571       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 572       // if we had to try at arbitrary address.
 573       establish_noaccess_prefix();
 574     }
 575   } else {
 576     initialize(size, alignment, large, NULL, false);
 577   }
 578 
 579   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 580          "area must be distinguishable from marks for mark-sweep");
 581   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 582          "area must be distinguishable from marks for mark-sweep");
 583 
 584   if (base() > 0) {
 585     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 586   }
 587 }
 588 
 589 // Reserve space for code segment.  Same as Java heap only we mark this as
 590 // executable.
 591 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 592                                      size_t rs_align,
 593                                      bool large) :
 594   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 595   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 596 }
 597 
 598 // VirtualSpace
 599 
 600 VirtualSpace::VirtualSpace() {
 601   _low_boundary           = NULL;
 602   _high_boundary          = NULL;
 603   _low                    = NULL;
 604   _high                   = NULL;
 605   _lower_high             = NULL;
 606   _middle_high            = NULL;
 607   _upper_high             = NULL;
 608   _lower_high_boundary    = NULL;
 609   _middle_high_boundary   = NULL;
 610   _upper_high_boundary    = NULL;
 611   _lower_alignment        = 0;
 612   _middle_alignment       = 0;
 613   _upper_alignment        = 0;
 614   _special                = false;
 615   _executable             = false;
 616 }
 617 
 618 
 619 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 620   const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
 621   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 622 }
 623 
 624 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 625   if(!rs.is_reserved()) return false;  // allocation failed.
 626   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 627   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 628 
 629   _low_boundary  = rs.base();
 630   _high_boundary = low_boundary() + rs.size();
 631 
 632   _low = low_boundary();
 633   _high = low();
 634 
 635   _special = rs.special();
 636   _executable = rs.executable();
 637 
 638   // When a VirtualSpace begins life at a large size, make all future expansion
 639   // and shrinking occur aligned to a granularity of large pages.  This avoids
 640   // fragmentation of physical addresses that inhibits the use of large pages
 641   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 642   // page size, the only spaces that get handled this way are codecache and
 643   // the heap itself, both of which provide a substantial performance
 644   // boost in many benchmarks when covered by large pages.
 645   //
 646   // No attempt is made to force large page alignment at the very top and
 647   // bottom of the space if they are not aligned so already.
 648   _lower_alignment  = os::vm_page_size();
 649   _middle_alignment = max_commit_granularity;
 650   _upper_alignment  = os::vm_page_size();
 651 
 652   // End of each region
 653   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
 654   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
 655   _upper_high_boundary = high_boundary();
 656 
 657   // High address of each region
 658   _lower_high = low_boundary();
 659   _middle_high = lower_high_boundary();
 660   _upper_high = middle_high_boundary();
 661 
 662   // commit to initial size
 663   if (committed_size > 0) {
 664     if (!expand_by(committed_size)) {
 665       return false;
 666     }
 667   }
 668   return true;
 669 }
 670 
 671 
 672 VirtualSpace::~VirtualSpace() {
 673   release();
 674 }
 675 
 676 
 677 void VirtualSpace::release() {
 678   // This does not release memory it reserved.
 679   // Caller must release via rs.release();
 680   _low_boundary           = NULL;
 681   _high_boundary          = NULL;
 682   _low                    = NULL;
 683   _high                   = NULL;
 684   _lower_high             = NULL;
 685   _middle_high            = NULL;
 686   _upper_high             = NULL;
 687   _lower_high_boundary    = NULL;
 688   _middle_high_boundary   = NULL;
 689   _upper_high_boundary    = NULL;
 690   _lower_alignment        = 0;
 691   _middle_alignment       = 0;
 692   _upper_alignment        = 0;
 693   _special                = false;
 694   _executable             = false;
 695 }
 696 
 697 
 698 size_t VirtualSpace::committed_size() const {
 699   return pointer_delta(high(), low(), sizeof(char));
 700 }
 701 
 702 
 703 size_t VirtualSpace::reserved_size() const {
 704   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 705 }
 706 
 707 
 708 size_t VirtualSpace::uncommitted_size()  const {
 709   return reserved_size() - committed_size();
 710 }
 711 
 712 size_t VirtualSpace::actual_committed_size() const {
 713   // Special VirtualSpaces commit all reserved space up front.
 714   if (special()) {
 715     return reserved_size();
 716   }
 717 
 718   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 719   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 720   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 721 
 722 #ifdef ASSERT
 723   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 724   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 725   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 726 
 727   if (committed_high > 0) {
 728     assert(committed_low == lower, "Must be");
 729     assert(committed_middle == middle, "Must be");
 730   }
 731 
 732   if (committed_middle > 0) {
 733     assert(committed_low == lower, "Must be");
 734   }
 735   if (committed_middle < middle) {
 736     assert(committed_high == 0, "Must be");
 737   }
 738 
 739   if (committed_low < lower) {
 740     assert(committed_high == 0, "Must be");
 741     assert(committed_middle == 0, "Must be");
 742   }
 743 #endif
 744 
 745   return committed_low + committed_middle + committed_high;
 746 }
 747 
 748 
 749 bool VirtualSpace::contains(const void* p) const {
 750   return low() <= (const char*) p && (const char*) p < high();
 751 }
 752 
 753 static void pretouch_expanded_memory(void* start, void* end) {
 754   assert(is_ptr_aligned(start, os::vm_page_size()), "Unexpected alignment");
 755   assert(is_ptr_aligned(end,   os::vm_page_size()), "Unexpected alignment");
 756 
 757   os::pretouch_memory(start, end);
 758 }
 759 
 760 static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) {
 761   if (os::commit_memory(start, size, alignment, executable)) {
 762     if (pre_touch || AlwaysPreTouch) {
 763       pretouch_expanded_memory(start, start + size);
 764     }
 765     return true;
 766   }
 767 
 768   debug_only(warning(
 769       "INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMAT
 770       " size=" SIZE_FORMAT ", executable=%d) failed",
 771       p2i(start), p2i(start + size), size, executable);)
 772 
 773   return false;
 774 }
 775 
 776 /*
 777    First we need to determine if a particular virtual space is using large
 778    pages.  This is done at the initialize function and only virtual spaces
 779    that are larger than LargePageSizeInBytes use large pages.  Once we
 780    have determined this, all expand_by and shrink_by calls must grow and
 781    shrink by large page size chunks.  If a particular request
 782    is within the current large page, the call to commit and uncommit memory
 783    can be ignored.  In the case that the low and high boundaries of this
 784    space is not large page aligned, the pages leading to the first large
 785    page address and the pages after the last large page address must be
 786    allocated with default pages.
 787 */
 788 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 789   if (uncommitted_size() < bytes) {
 790     return false;
 791   }
 792 
 793   if (special()) {
 794     // don't commit memory if the entire space is pinned in memory
 795     _high += bytes;
 796     return true;
 797   }
 798 
 799   char* previous_high = high();
 800   char* unaligned_new_high = high() + bytes;
 801   assert(unaligned_new_high <= high_boundary(), "cannot expand by more than upper boundary");
 802 
 803   // Calculate where the new high for each of the regions should be.  If
 804   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 805   // then the unaligned lower and upper new highs would be the
 806   // lower_high() and upper_high() respectively.
 807   char* unaligned_lower_new_high =  MIN2(unaligned_new_high, lower_high_boundary());
 808   char* unaligned_middle_new_high = MIN2(unaligned_new_high, middle_high_boundary());
 809   char* unaligned_upper_new_high =  MIN2(unaligned_new_high, upper_high_boundary());
 810 
 811   // Align the new highs based on the regions alignment.  lower and upper
 812   // alignment will always be default page size.  middle alignment will be
 813   // LargePageSizeInBytes if the actual size of the virtual space is in
 814   // fact larger than LargePageSizeInBytes.
 815   char* aligned_lower_new_high =  (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 816   char* aligned_middle_new_high = (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 817   char* aligned_upper_new_high =  (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 818 
 819   // Determine which regions need to grow in this expand_by call.
 820   // If you are growing in the lower region, high() must be in that
 821   // region so calculate the size based on high().  For the middle and
 822   // upper regions, determine the starting point of growth based on the
 823   // location of high().  By getting the MAX of the region's low address
 824   // (or the previous region's high address) and high(), we can tell if it
 825   // is an intra or inter region growth.
 826   size_t lower_needs = 0;
 827   if (aligned_lower_new_high > lower_high()) {
 828     lower_needs = pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 829   }
 830   size_t middle_needs = 0;
 831   if (aligned_middle_new_high > middle_high()) {
 832     middle_needs = pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 833   }
 834   size_t upper_needs = 0;
 835   if (aligned_upper_new_high > upper_high()) {
 836     upper_needs = pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 837   }
 838 
 839   // Check contiguity.
 840   assert(low_boundary() <= lower_high() && lower_high() <= lower_high_boundary(),
 841          "high address must be contained within the region");
 842   assert(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary(),
 843          "high address must be contained within the region");
 844   assert(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary(),
 845          "high address must be contained within the region");
 846 
 847   // Commit regions
 848   if (lower_needs > 0) {
 849     assert(lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region");
 850     if (!commit_expanded(lower_high(), lower_needs, _lower_alignment, pre_touch, _executable)) {
 851       return false;
 852     }
 853     _lower_high += lower_needs;
 854   }
 855 
 856   if (middle_needs > 0) {
 857     assert(middle_high() + middle_needs <= middle_high_boundary(), "must not expand beyond region");
 858     if (!commit_expanded(middle_high(), middle_needs, _middle_alignment, pre_touch, _executable)) {
 859       return false;
 860     }
 861     _middle_high += middle_needs;
 862   }
 863 
 864   if (upper_needs > 0) {
 865     assert(upper_high() + upper_needs <= upper_high_boundary(), "must not expand beyond region");
 866     if (!commit_expanded(upper_high(), upper_needs, _upper_alignment, pre_touch, _executable)) {
 867       return false;
 868     }
 869     _upper_high += upper_needs;
 870   }
 871 
 872   _high += bytes;
 873   return true;
 874 }
 875 
 876 // A page is uncommitted if the contents of the entire page is deemed unusable.
 877 // Continue to decrement the high() pointer until it reaches a page boundary
 878 // in which case that particular page can now be uncommitted.
 879 void VirtualSpace::shrink_by(size_t size) {
 880   if (committed_size() < size)
 881     fatal("Cannot shrink virtual space to negative size");
 882 
 883   if (special()) {
 884     // don't uncommit if the entire space is pinned in memory
 885     _high -= size;
 886     return;
 887   }
 888 
 889   char* unaligned_new_high = high() - size;
 890   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 891 
 892   // Calculate new unaligned address
 893   char* unaligned_upper_new_high =
 894     MAX2(unaligned_new_high, middle_high_boundary());
 895   char* unaligned_middle_new_high =
 896     MAX2(unaligned_new_high, lower_high_boundary());
 897   char* unaligned_lower_new_high =
 898     MAX2(unaligned_new_high, low_boundary());
 899 
 900   // Align address to region's alignment
 901   char* aligned_upper_new_high =
 902     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 903   char* aligned_middle_new_high =
 904     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 905   char* aligned_lower_new_high =
 906     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 907 
 908   // Determine which regions need to shrink
 909   size_t upper_needs = 0;
 910   if (aligned_upper_new_high < upper_high()) {
 911     upper_needs =
 912       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 913   }
 914   size_t middle_needs = 0;
 915   if (aligned_middle_new_high < middle_high()) {
 916     middle_needs =
 917       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 918   }
 919   size_t lower_needs = 0;
 920   if (aligned_lower_new_high < lower_high()) {
 921     lower_needs =
 922       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 923   }
 924 
 925   // Check contiguity.
 926   assert(middle_high_boundary() <= upper_high() &&
 927          upper_high() <= upper_high_boundary(),
 928          "high address must be contained within the region");
 929   assert(lower_high_boundary() <= middle_high() &&
 930          middle_high() <= middle_high_boundary(),
 931          "high address must be contained within the region");
 932   assert(low_boundary() <= lower_high() &&
 933          lower_high() <= lower_high_boundary(),
 934          "high address must be contained within the region");
 935 
 936   // Uncommit
 937   if (upper_needs > 0) {
 938     assert(middle_high_boundary() <= aligned_upper_new_high &&
 939            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 940            "must not shrink beyond region");
 941     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 942       debug_only(warning("os::uncommit_memory failed"));
 943       return;
 944     } else {
 945       _upper_high -= upper_needs;
 946     }
 947   }
 948   if (middle_needs > 0) {
 949     assert(lower_high_boundary() <= aligned_middle_new_high &&
 950            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 951            "must not shrink beyond region");
 952     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 953       debug_only(warning("os::uncommit_memory failed"));
 954       return;
 955     } else {
 956       _middle_high -= middle_needs;
 957     }
 958   }
 959   if (lower_needs > 0) {
 960     assert(low_boundary() <= aligned_lower_new_high &&
 961            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 962            "must not shrink beyond region");
 963     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 964       debug_only(warning("os::uncommit_memory failed"));
 965       return;
 966     } else {
 967       _lower_high -= lower_needs;
 968     }
 969   }
 970 
 971   _high -= size;
 972 }
 973 
 974 #ifndef PRODUCT
 975 void VirtualSpace::check_for_contiguity() {
 976   // Check contiguity.
 977   assert(low_boundary() <= lower_high() &&
 978          lower_high() <= lower_high_boundary(),
 979          "high address must be contained within the region");
 980   assert(lower_high_boundary() <= middle_high() &&
 981          middle_high() <= middle_high_boundary(),
 982          "high address must be contained within the region");
 983   assert(middle_high_boundary() <= upper_high() &&
 984          upper_high() <= upper_high_boundary(),
 985          "high address must be contained within the region");
 986   assert(low() >= low_boundary(), "low");
 987   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
 988   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
 989   assert(high() <= upper_high(), "upper high");
 990 }
 991 
 992 void VirtualSpace::print_on(outputStream* out) {
 993   out->print   ("Virtual space:");
 994   if (special()) out->print(" (pinned in memory)");
 995   out->cr();
 996   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
 997   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
 998   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low()), p2i(high()));
 999   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low_boundary()), p2i(high_boundary()));
1000 }
1001 
1002 void VirtualSpace::print() {
1003   print_on(tty);
1004 }
1005 
1006 /////////////// Unit tests ///////////////
1007 
1008 #ifndef PRODUCT
1009 
1010 #define test_log(...) \
1011   do {\
1012     if (VerboseInternalVMTests) { \
1013       tty->print_cr(__VA_ARGS__); \
1014       tty->flush(); \
1015     }\
1016   } while (false)
1017 
1018 class TestReservedSpace : AllStatic {
1019  public:
1020   static void small_page_write(void* addr, size_t size) {
1021     size_t page_size = os::vm_page_size();
1022 
1023     char* end = (char*)addr + size;
1024     for (char* p = (char*)addr; p < end; p += page_size) {
1025       *p = 1;
1026     }
1027   }
1028 
1029   static void release_memory_for_test(ReservedSpace rs) {
1030     if (rs.special()) {
1031       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1032     } else {
1033       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1034     }
1035   }
1036 
1037   static void test_reserved_space1(size_t size, size_t alignment) {
1038     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
1039 
1040     assert(is_size_aligned(size, alignment), "Incorrect input parameters");
1041 
1042     ReservedSpace rs(size,          // size
1043                      alignment,     // alignment
1044                      UseLargePages, // large
1045                      (char *)NULL); // requested_address
1046 
1047     test_log(" rs.special() == %d", rs.special());
1048 
1049     assert(rs.base() != NULL, "Must be");
1050     assert(rs.size() == size, "Must be");
1051 
1052     assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1053     assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1054 
1055     if (rs.special()) {
1056       small_page_write(rs.base(), size);
1057     }
1058 
1059     release_memory_for_test(rs);
1060   }
1061 
1062   static void test_reserved_space2(size_t size) {
1063     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
1064 
1065     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1066 
1067     ReservedSpace rs(size);
1068 
1069     test_log(" rs.special() == %d", rs.special());
1070 
1071     assert(rs.base() != NULL, "Must be");
1072     assert(rs.size() == size, "Must be");
1073 
1074     if (rs.special()) {
1075       small_page_write(rs.base(), size);
1076     }
1077 
1078     release_memory_for_test(rs);
1079   }
1080 
1081   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1082     test_log("test_reserved_space3(%p, %p, %d)",
1083         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
1084 
1085     if (size < alignment) {
1086       // Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test.
1087       assert((size_t)os::vm_page_size() == os::large_page_size(), "Test needs further refinement");
1088       return;
1089     }
1090 
1091     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1092     assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
1093 
1094     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1095 
1096     ReservedSpace rs(size, alignment, large, false);
1097 
1098     test_log(" rs.special() == %d", rs.special());
1099 
1100     assert(rs.base() != NULL, "Must be");
1101     assert(rs.size() == size, "Must be");
1102 
1103     if (rs.special()) {
1104       small_page_write(rs.base(), size);
1105     }
1106 
1107     release_memory_for_test(rs);
1108   }
1109 
1110 
1111   static void test_reserved_space1() {
1112     size_t size = 2 * 1024 * 1024;
1113     size_t ag   = os::vm_allocation_granularity();
1114 
1115     test_reserved_space1(size,      ag);
1116     test_reserved_space1(size * 2,  ag);
1117     test_reserved_space1(size * 10, ag);
1118   }
1119 
1120   static void test_reserved_space2() {
1121     size_t size = 2 * 1024 * 1024;
1122     size_t ag = os::vm_allocation_granularity();
1123 
1124     test_reserved_space2(size * 1);
1125     test_reserved_space2(size * 2);
1126     test_reserved_space2(size * 10);
1127     test_reserved_space2(ag);
1128     test_reserved_space2(size - ag);
1129     test_reserved_space2(size);
1130     test_reserved_space2(size + ag);
1131     test_reserved_space2(size * 2);
1132     test_reserved_space2(size * 2 - ag);
1133     test_reserved_space2(size * 2 + ag);
1134     test_reserved_space2(size * 3);
1135     test_reserved_space2(size * 3 - ag);
1136     test_reserved_space2(size * 3 + ag);
1137     test_reserved_space2(size * 10);
1138     test_reserved_space2(size * 10 + size / 2);
1139   }
1140 
1141   static void test_reserved_space3() {
1142     size_t ag = os::vm_allocation_granularity();
1143 
1144     test_reserved_space3(ag,      ag    , false);
1145     test_reserved_space3(ag * 2,  ag    , false);
1146     test_reserved_space3(ag * 3,  ag    , false);
1147     test_reserved_space3(ag * 2,  ag * 2, false);
1148     test_reserved_space3(ag * 4,  ag * 2, false);
1149     test_reserved_space3(ag * 8,  ag * 2, false);
1150     test_reserved_space3(ag * 4,  ag * 4, false);
1151     test_reserved_space3(ag * 8,  ag * 4, false);
1152     test_reserved_space3(ag * 16, ag * 4, false);
1153 
1154     if (UseLargePages) {
1155       size_t lp = os::large_page_size();
1156 
1157       // Without large pages
1158       test_reserved_space3(lp,     ag * 4, false);
1159       test_reserved_space3(lp * 2, ag * 4, false);
1160       test_reserved_space3(lp * 4, ag * 4, false);
1161       test_reserved_space3(lp,     lp    , false);
1162       test_reserved_space3(lp * 2, lp    , false);
1163       test_reserved_space3(lp * 3, lp    , false);
1164       test_reserved_space3(lp * 2, lp * 2, false);
1165       test_reserved_space3(lp * 4, lp * 2, false);
1166       test_reserved_space3(lp * 8, lp * 2, false);
1167 
1168       // With large pages
1169       test_reserved_space3(lp, ag * 4    , true);
1170       test_reserved_space3(lp * 2, ag * 4, true);
1171       test_reserved_space3(lp * 4, ag * 4, true);
1172       test_reserved_space3(lp, lp        , true);
1173       test_reserved_space3(lp * 2, lp    , true);
1174       test_reserved_space3(lp * 3, lp    , true);
1175       test_reserved_space3(lp * 2, lp * 2, true);
1176       test_reserved_space3(lp * 4, lp * 2, true);
1177       test_reserved_space3(lp * 8, lp * 2, true);
1178     }
1179   }
1180 
1181   static void test_reserved_space() {
1182     test_reserved_space1();
1183     test_reserved_space2();
1184     test_reserved_space3();
1185   }
1186 };
1187 
1188 void TestReservedSpace_test() {
1189   TestReservedSpace::test_reserved_space();
1190 }
1191 
1192 #define assert_equals(actual, expected)  \
1193   assert(actual == expected,             \
1194          "Got " SIZE_FORMAT " expected " \
1195          SIZE_FORMAT, actual, expected);
1196 
1197 #define assert_ge(value1, value2)                  \
1198   assert(value1 >= value2,                         \
1199          "'" #value1 "': " SIZE_FORMAT " '"        \
1200          #value2 "': " SIZE_FORMAT, value1, value2);
1201 
1202 #define assert_lt(value1, value2)                  \
1203   assert(value1 < value2,                          \
1204          "'" #value1 "': " SIZE_FORMAT " '"        \
1205          #value2 "': " SIZE_FORMAT, value1, value2);
1206 
1207 
1208 class TestVirtualSpace : AllStatic {
1209   enum TestLargePages {
1210     Default,
1211     Disable,
1212     Reserve,
1213     Commit
1214   };
1215 
1216   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
1217     switch(mode) {
1218     default:
1219     case Default:
1220     case Reserve:
1221       return ReservedSpace(reserve_size_aligned);
1222     case Disable:
1223     case Commit:
1224       return ReservedSpace(reserve_size_aligned,
1225                            os::vm_allocation_granularity(),
1226                            /* large */ false, /* exec */ false);
1227     }
1228   }
1229 
1230   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1231     switch(mode) {
1232     default:
1233     case Default:
1234     case Reserve:
1235       return vs.initialize(rs, 0);
1236     case Disable:
1237       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1238     case Commit:
1239       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1240     }
1241   }
1242 
1243  public:
1244   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1245                                                         TestLargePages mode = Default) {
1246     size_t granularity = os::vm_allocation_granularity();
1247     size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
1248 
1249     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1250 
1251     assert(reserved.is_reserved(), "Must be");
1252 
1253     VirtualSpace vs;
1254     bool initialized = initialize_virtual_space(vs, reserved, mode);
1255     assert(initialized, "Failed to initialize VirtualSpace");
1256 
1257     vs.expand_by(commit_size, false);
1258 
1259     if (vs.special()) {
1260       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1261     } else {
1262       assert_ge(vs.actual_committed_size(), commit_size);
1263       // Approximate the commit granularity.
1264       // Make sure that we don't commit using large pages
1265       // if large pages has been disabled for this VirtualSpace.
1266       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1267                                    os::vm_page_size() : os::large_page_size();
1268       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1269     }
1270 
1271     reserved.release();
1272   }
1273 
1274   static void test_virtual_space_actual_committed_space_one_large_page() {
1275     if (!UseLargePages) {
1276       return;
1277     }
1278 
1279     size_t large_page_size = os::large_page_size();
1280 
1281     ReservedSpace reserved(large_page_size, large_page_size, true, false);
1282 
1283     assert(reserved.is_reserved(), "Must be");
1284 
1285     VirtualSpace vs;
1286     bool initialized = vs.initialize(reserved, 0);
1287     assert(initialized, "Failed to initialize VirtualSpace");
1288 
1289     vs.expand_by(large_page_size, false);
1290 
1291     assert_equals(vs.actual_committed_size(), large_page_size);
1292 
1293     reserved.release();
1294   }
1295 
1296   static void test_virtual_space_actual_committed_space() {
1297     test_virtual_space_actual_committed_space(4 * K, 0);
1298     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1299     test_virtual_space_actual_committed_space(8 * K, 0);
1300     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1301     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1302     test_virtual_space_actual_committed_space(12 * K, 0);
1303     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1304     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1305     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1306     test_virtual_space_actual_committed_space(64 * K, 0);
1307     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1308     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1309     test_virtual_space_actual_committed_space(2 * M, 0);
1310     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1311     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1312     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1313     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1314     test_virtual_space_actual_committed_space(10 * M, 0);
1315     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1316     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1317     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1318     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1319     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1320     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1321   }
1322 
1323   static void test_virtual_space_disable_large_pages() {
1324     if (!UseLargePages) {
1325       return;
1326     }
1327     // These test cases verify that if we force VirtualSpace to disable large pages
1328     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1329     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1330     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1331     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1332     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1333     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1334     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1335 
1336     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1337     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1338     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1339     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1340     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1341     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1342     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1343 
1344     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1345     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1346     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1347     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1348     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1349     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1350     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1351   }
1352 
1353   static void test_virtual_space() {
1354     test_virtual_space_actual_committed_space();
1355     test_virtual_space_actual_committed_space_one_large_page();
1356     test_virtual_space_disable_large_pages();
1357   }
1358 };
1359 
1360 void TestVirtualSpace_test() {
1361   TestVirtualSpace::test_virtual_space();
1362 }
1363 
1364 #endif // PRODUCT
1365 
1366 #endif