1 /*
   2  * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "logging/log.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "memory/virtualspace.hpp"
  29 #include "oops/markOop.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "services/memTracker.hpp"
  32 
  33 // ReservedSpace
  34 
  35 // Dummy constructor
  36 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  37     _alignment(0), _special(false), _executable(false) {
  38 }
  39 
  40 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) {
  41   bool has_preferred_page_size = preferred_page_size != 0;
  42   // Want to use large pages where possible and pad with small pages.
  43   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
  44   bool large_pages = page_size != (size_t)os::vm_page_size();
  45   size_t alignment;
  46   if (large_pages && has_preferred_page_size) {
  47     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  48     // ReservedSpace initialization requires size to be aligned to the given
  49     // alignment. Align the size up.
  50     size = align_up(size, alignment);
  51   } else {
  52     // Don't force the alignment to be large page aligned,
  53     // since that will waste memory.
  54     alignment = os::vm_allocation_granularity();
  55   }
  56   initialize(size, alignment, large_pages, NULL, false);
  57 }
  58 
  59 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  60                              bool large,
  61                              char* requested_address) {
  62   initialize(size, alignment, large, requested_address, false);
  63 }
  64 
  65 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  66                              bool large,
  67                              bool executable) {
  68   initialize(size, alignment, large, NULL, executable);
  69 }
  70 
  71 // Helper method.
  72 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  73                                            const size_t size, bool special)
  74 {
  75   if (base == requested_address || requested_address == NULL)
  76     return false; // did not fail
  77 
  78   if (base != NULL) {
  79     // Different reserve address may be acceptable in other cases
  80     // but for compressed oops heap should be at requested address.
  81     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  82     log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
  83     // OS ignored requested address. Try different address.
  84     if (special) {
  85       if (!os::release_memory_special(base, size)) {
  86         fatal("os::release_memory_special failed");
  87       }
  88     } else {
  89       if (!os::release_memory(base, size)) {
  90         fatal("os::release_memory failed");
  91       }
  92     }
  93   }
  94   return true;
  95 }
  96 
  97 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
  98                                char* requested_address,
  99                                bool executable) {
 100   const size_t granularity = os::vm_allocation_granularity();
 101   assert((size & (granularity - 1)) == 0,
 102          "size not aligned to os::vm_allocation_granularity()");
 103   assert((alignment & (granularity - 1)) == 0,
 104          "alignment not aligned to os::vm_allocation_granularity()");
 105   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 106          "not a power of 2");
 107 
 108   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 109 
 110   _base = NULL;
 111   _size = 0;
 112   _special = false;
 113   _executable = executable;
 114   _alignment = 0;
 115   _noaccess_prefix = 0;
 116   if (size == 0) {
 117     return;
 118   }
 119 
 120   // If OS doesn't support demand paging for large page memory, we need
 121   // to use reserve_memory_special() to reserve and pin the entire region.
 122   bool special = large && !os::can_commit_large_page_memory();
 123   char* base = NULL;
 124 
 125   if (special) {
 126 
 127     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 128 
 129     if (base != NULL) {
 130       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 131         // OS ignored requested address. Try different address.
 132         return;
 133       }
 134       // Check alignment constraints.
 135       assert((uintptr_t) base % alignment == 0,
 136              "Large pages returned a non-aligned address, base: "
 137              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 138              p2i(base), alignment);
 139       _special = true;
 140     } else {
 141       // failed; try to reserve regular memory below
 142       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 143                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 144         log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 145       }
 146     }
 147   }
 148 
 149   if (base == NULL) {
 150     // Optimistically assume that the OSes returns an aligned base pointer.
 151     // When reserving a large address range, most OSes seem to align to at
 152     // least 64K.
 153 
 154     // If the memory was requested at a particular address, use
 155     // os::attempt_reserve_memory_at() to avoid over mapping something
 156     // important.  If available space is not detected, return NULL.
 157 
 158     if (requested_address != 0) {
 159       base = os::attempt_reserve_memory_at(size, requested_address);
 160       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 161         // OS ignored requested address. Try different address.
 162         base = NULL;
 163       }
 164     } else {
 165       base = os::reserve_memory(size, NULL, alignment);
 166     }
 167 
 168     if (base == NULL) return;
 169 
 170     // Check alignment constraints
 171     if ((((size_t)base) & (alignment - 1)) != 0) {
 172       // Base not aligned, retry
 173       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 174       // Make sure that size is aligned
 175       size = align_up(size, alignment);
 176       base = os::reserve_memory_aligned(size, alignment);
 177 
 178       if (requested_address != 0 &&
 179           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 180         // As a result of the alignment constraints, the allocated base differs
 181         // from the requested address. Return back to the caller who can
 182         // take remedial action (like try again without a requested address).
 183         assert(_base == NULL, "should be");
 184         return;
 185       }
 186     }
 187   }
 188   // Done
 189   _base = base;
 190   _size = size;
 191   _alignment = alignment;
 192 }
 193 
 194 
 195 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 196                              bool special, bool executable) {
 197   assert((size % os::vm_allocation_granularity()) == 0,
 198          "size not allocation aligned");
 199   _base = base;
 200   _size = size;
 201   _alignment = alignment;
 202   _noaccess_prefix = 0;
 203   _special = special;
 204   _executable = executable;
 205 }
 206 
 207 
 208 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 209                                         bool split, bool realloc) {
 210   assert(partition_size <= size(), "partition failed");
 211   if (split) {
 212     os::split_reserved_memory(base(), size(), partition_size, realloc);
 213   }
 214   ReservedSpace result(base(), partition_size, alignment, special(),
 215                        executable());
 216   return result;
 217 }
 218 
 219 
 220 ReservedSpace
 221 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 222   assert(partition_size <= size(), "partition failed");
 223   ReservedSpace result(base() + partition_size, size() - partition_size,
 224                        alignment, special(), executable());
 225   return result;
 226 }
 227 
 228 
 229 size_t ReservedSpace::page_align_size_up(size_t size) {
 230   return align_up(size, os::vm_page_size());
 231 }
 232 
 233 
 234 size_t ReservedSpace::page_align_size_down(size_t size) {
 235   return align_down(size, os::vm_page_size());
 236 }
 237 
 238 
 239 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 240   return align_up(size, os::vm_allocation_granularity());
 241 }
 242 
 243 
 244 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 245   return align_down(size, os::vm_allocation_granularity());
 246 }
 247 
 248 
 249 void ReservedSpace::release() {
 250   if (is_reserved()) {
 251     char *real_base = _base - _noaccess_prefix;
 252     const size_t real_size = _size + _noaccess_prefix;
 253     if (special()) {
 254       os::release_memory_special(real_base, real_size);
 255     } else{
 256       os::release_memory(real_base, real_size);
 257     }
 258     _base = NULL;
 259     _size = 0;
 260     _noaccess_prefix = 0;
 261     _alignment = 0;
 262     _special = false;
 263     _executable = false;
 264   }
 265 }
 266 
 267 static size_t noaccess_prefix_size(size_t alignment) {
 268   return lcm(os::vm_page_size(), alignment);
 269 }
 270 
 271 void ReservedHeapSpace::establish_noaccess_prefix() {
 272   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 273   _noaccess_prefix = noaccess_prefix_size(_alignment);
 274 
 275   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 276     if (true
 277         WIN64_ONLY(&& !UseLargePages)
 278         AIX_ONLY(&& os::vm_page_size() != 64*K)) {
 279       // Protect memory at the base of the allocated region.
 280       // If special, the page was committed (only matters on windows)
 281       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 282         fatal("cannot protect protection page");
 283       }
 284       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
 285                                  PTR_FORMAT " / " INTX_FORMAT " bytes",
 286                                  p2i(_base),
 287                                  _noaccess_prefix);
 288       assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
 289     } else {
 290       Universe::set_narrow_oop_use_implicit_null_checks(false);
 291     }
 292   }
 293 
 294   _base += _noaccess_prefix;
 295   _size -= _noaccess_prefix;
 296   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 297 }
 298 
 299 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 300 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 301 // might still fulfill the wishes of the caller.
 302 // Assures the memory is aligned to 'alignment'.
 303 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 304 void ReservedHeapSpace::try_reserve_heap(size_t size,
 305                                          size_t alignment,
 306                                          bool large,
 307                                          char* requested_address) {
 308   if (_base != NULL) {
 309     // We tried before, but we didn't like the address delivered.
 310     release();
 311   }
 312 
 313   // If OS doesn't support demand paging for large page memory, we need
 314   // to use reserve_memory_special() to reserve and pin the entire region.
 315   bool special = large && !os::can_commit_large_page_memory();
 316   char* base = NULL;
 317 
 318   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
 319                              " heap of size " SIZE_FORMAT_HEX,
 320                              p2i(requested_address),
 321                              size);
 322 
 323   if (special) {
 324     base = os::reserve_memory_special(size, alignment, requested_address, false);
 325 
 326     if (base != NULL) {
 327       // Check alignment constraints.
 328       assert((uintptr_t) base % alignment == 0,
 329              "Large pages returned a non-aligned address, base: "
 330              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 331              p2i(base), alignment);
 332       _special = true;
 333     }
 334   }
 335 
 336   if (base == NULL) {
 337     // Failed; try to reserve regular memory below
 338     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 339                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 340       log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 341     }
 342 
 343     // Optimistically assume that the OSes returns an aligned base pointer.
 344     // When reserving a large address range, most OSes seem to align to at
 345     // least 64K.
 346 
 347     // If the memory was requested at a particular address, use
 348     // os::attempt_reserve_memory_at() to avoid over mapping something
 349     // important.  If available space is not detected, return NULL.
 350 
 351     if (requested_address != 0) {
 352       base = os::attempt_reserve_memory_at(size, requested_address);
 353     } else {
 354       base = os::reserve_memory(size, NULL, alignment);
 355     }
 356   }
 357   if (base == NULL) { return; }
 358 
 359   // Done
 360   _base = base;
 361   _size = size;
 362   _alignment = alignment;
 363 
 364   // Check alignment constraints
 365   if ((((size_t)base) & (alignment - 1)) != 0) {
 366     // Base not aligned, retry.
 367     release();
 368   }
 369 }
 370 
 371 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 372                                           char *lowest_start,
 373                                           size_t attach_point_alignment,
 374                                           char *aligned_heap_base_min_address,
 375                                           char *upper_bound,
 376                                           size_t size,
 377                                           size_t alignment,
 378                                           bool large) {
 379   const size_t attach_range = highest_start - lowest_start;
 380   // Cap num_attempts at possible number.
 381   // At least one is possible even for 0 sized attach range.
 382   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
 383   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
 384 
 385   const size_t stepsize = (attach_range == 0) ? // Only one try.
 386     (size_t) highest_start : align_up(attach_range / num_attempts_to_try, attach_point_alignment);
 387 
 388   // Try attach points from top to bottom.
 389   char* attach_point = highest_start;
 390   while (attach_point >= lowest_start  &&
 391          attach_point <= highest_start &&  // Avoid wrap around.
 392          ((_base == NULL) ||
 393           (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
 394     try_reserve_heap(size, alignment, large, attach_point);
 395     attach_point -= stepsize;
 396   }
 397 }
 398 
 399 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
 400 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
 401 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
 402 
 403 // Helper for heap allocation. Returns an array with addresses
 404 // (OS-specific) which are suited for disjoint base mode. Array is
 405 // NULL terminated.
 406 static char** get_attach_addresses_for_disjoint_mode() {
 407   static uint64_t addresses[] = {
 408      2 * SIZE_32G,
 409      3 * SIZE_32G,
 410      4 * SIZE_32G,
 411      8 * SIZE_32G,
 412     10 * SIZE_32G,
 413      1 * SIZE_64K * SIZE_32G,
 414      2 * SIZE_64K * SIZE_32G,
 415      3 * SIZE_64K * SIZE_32G,
 416      4 * SIZE_64K * SIZE_32G,
 417     16 * SIZE_64K * SIZE_32G,
 418     32 * SIZE_64K * SIZE_32G,
 419     34 * SIZE_64K * SIZE_32G,
 420     0
 421   };
 422 
 423   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
 424   // the array is sorted.
 425   uint i = 0;
 426   while (addresses[i] != 0 &&
 427          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
 428     i++;
 429   }
 430   uint start = i;
 431 
 432   // Avoid more steps than requested.
 433   i = 0;
 434   while (addresses[start+i] != 0) {
 435     if (i == HeapSearchSteps) {
 436       addresses[start+i] = 0;
 437       break;
 438     }
 439     i++;
 440   }
 441 
 442   return (char**) &addresses[start];
 443 }
 444 
 445 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
 446   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
 447             "can not allocate compressed oop heap for this size");
 448   guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
 449   assert(HeapBaseMinAddress > 0, "sanity");
 450 
 451   const size_t granularity = os::vm_allocation_granularity();
 452   assert((size & (granularity - 1)) == 0,
 453          "size not aligned to os::vm_allocation_granularity()");
 454   assert((alignment & (granularity - 1)) == 0,
 455          "alignment not aligned to os::vm_allocation_granularity()");
 456   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 457          "not a power of 2");
 458 
 459   // The necessary attach point alignment for generated wish addresses.
 460   // This is needed to increase the chance of attaching for mmap and shmat.
 461   const size_t os_attach_point_alignment =
 462     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
 463     NOT_AIX(os::vm_allocation_granularity());
 464   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 465 
 466   char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment);
 467   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
 468     noaccess_prefix_size(alignment) : 0;
 469 
 470   // Attempt to alloc at user-given address.
 471   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 472     try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
 473     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
 474       release();
 475     }
 476   }
 477 
 478   // Keep heap at HeapBaseMinAddress.
 479   if (_base == NULL) {
 480 
 481     // Try to allocate the heap at addresses that allow efficient oop compression.
 482     // Different schemes are tried, in order of decreasing optimization potential.
 483     //
 484     // For this, try_reserve_heap() is called with the desired heap base addresses.
 485     // A call into the os layer to allocate at a given address can return memory
 486     // at a different address than requested.  Still, this might be memory at a useful
 487     // address. try_reserve_heap() always returns this allocated memory, as only here
 488     // the criteria for a good heap are checked.
 489 
 490     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 491     // Give it several tries from top of range to bottom.
 492     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
 493 
 494       // Calc address range within we try to attach (range of possible start addresses).
 495       char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 496       char* const lowest_start  = align_up(aligned_heap_base_min_address, attach_point_alignment);
 497       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 498                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
 499     }
 500 
 501     // zerobased: Attempt to allocate in the lower 32G.
 502     // But leave room for the compressed class pointers, which is allocated above
 503     // the heap.
 504     char *zerobased_max = (char *)OopEncodingHeapMax;
 505     const size_t class_space = align_up(CompressedClassSpaceSize, alignment);
 506     // For small heaps, save some space for compressed class pointer
 507     // space so it can be decoded with no base.
 508     if (UseCompressedClassPointers && !UseSharedSpaces &&
 509         OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
 510         (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
 511       zerobased_max = (char *)OopEncodingHeapMax - class_space;
 512     }
 513 
 514     // Give it several tries from top of range to bottom.
 515     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
 516         ((_base == NULL) ||                        // No previous try succeeded.
 517          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 518 
 519       // Calc address range within we try to attach (range of possible start addresses).
 520       char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
 521       // Need to be careful about size being guaranteed to be less
 522       // than UnscaledOopHeapMax due to type constraints.
 523       char *lowest_start = aligned_heap_base_min_address;
 524       uint64_t unscaled_end = UnscaledOopHeapMax - size;
 525       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
 526         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
 527       }
 528       lowest_start = align_up(lowest_start, attach_point_alignment);
 529       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 530                         aligned_heap_base_min_address, zerobased_max, size, alignment, large);
 531     }
 532 
 533     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 534     // implement null checks.
 535     noaccess_prefix = noaccess_prefix_size(alignment);
 536 
 537     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
 538     char** addresses = get_attach_addresses_for_disjoint_mode();
 539     int i = 0;
 540     while (addresses[i] &&                                 // End of array not yet reached.
 541            ((_base == NULL) ||                             // No previous try succeeded.
 542             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 543              !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 544       char* const attach_point = addresses[i];
 545       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 546       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 547       i++;
 548     }
 549 
 550     // Last, desperate try without any placement.
 551     if (_base == NULL) {
 552       log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
 553       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 554     }
 555   }
 556 }
 557 
 558 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() {
 559 
 560   if (size == 0) {
 561     return;
 562   }
 563 
 564   // Heap size should be aligned to alignment, too.
 565   guarantee(is_aligned(size, alignment), "set by caller");
 566 
 567   if (UseCompressedOops) {
 568     initialize_compressed_heap(size, alignment, large);
 569     if (_size > size) {
 570       // We allocated heap with noaccess prefix.
 571       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 572       // if we had to try at arbitrary address.
 573       establish_noaccess_prefix();
 574     }
 575   } else {
 576     initialize(size, alignment, large, NULL, false);
 577   }
 578 
 579   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 580          "area must be distinguishable from marks for mark-sweep");
 581   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 582          "area must be distinguishable from marks for mark-sweep");
 583 
 584   if (base() > 0) {
 585     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 586   }
 587 }
 588 
 589 // Reserve space for code segment.  Same as Java heap only we mark this as
 590 // executable.
 591 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 592                                      size_t rs_align,
 593                                      bool large) :
 594   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 595   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 596 }
 597 
 598 // VirtualSpace
 599 
 600 VirtualSpace::VirtualSpace() {
 601   _low_boundary           = NULL;
 602   _high_boundary          = NULL;
 603   _low                    = NULL;
 604   _high                   = NULL;
 605   _lower_high             = NULL;
 606   _middle_high            = NULL;
 607   _upper_high             = NULL;
 608   _lower_high_boundary    = NULL;
 609   _middle_high_boundary   = NULL;
 610   _upper_high_boundary    = NULL;
 611   _lower_alignment        = 0;
 612   _middle_alignment       = 0;
 613   _upper_alignment        = 0;
 614   _special                = false;
 615   _executable             = false;
 616 }
 617 
 618 
 619 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 620   const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
 621   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 622 }
 623 
 624 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 625   if(!rs.is_reserved()) return false;  // allocation failed.
 626   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 627   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 628 
 629   _low_boundary  = rs.base();
 630   _high_boundary = low_boundary() + rs.size();
 631 
 632   _low = low_boundary();
 633   _high = low();
 634 
 635   _special = rs.special();
 636   _executable = rs.executable();
 637 
 638   // When a VirtualSpace begins life at a large size, make all future expansion
 639   // and shrinking occur aligned to a granularity of large pages.  This avoids
 640   // fragmentation of physical addresses that inhibits the use of large pages
 641   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 642   // page size, the only spaces that get handled this way are codecache and
 643   // the heap itself, both of which provide a substantial performance
 644   // boost in many benchmarks when covered by large pages.
 645   //
 646   // No attempt is made to force large page alignment at the very top and
 647   // bottom of the space if they are not aligned so already.
 648   _lower_alignment  = os::vm_page_size();
 649   _middle_alignment = max_commit_granularity;
 650   _upper_alignment  = os::vm_page_size();
 651 
 652   // End of each region
 653   _lower_high_boundary = align_up(low_boundary(), middle_alignment());
 654   _middle_high_boundary = align_down(high_boundary(), middle_alignment());
 655   _upper_high_boundary = high_boundary();
 656 
 657   // High address of each region
 658   _lower_high = low_boundary();
 659   _middle_high = lower_high_boundary();
 660   _upper_high = middle_high_boundary();
 661 
 662   // commit to initial size
 663   if (committed_size > 0) {
 664     if (!expand_by(committed_size)) {
 665       return false;
 666     }
 667   }
 668   return true;
 669 }
 670 
 671 
 672 VirtualSpace::~VirtualSpace() {
 673   release();
 674 }
 675 
 676 
 677 void VirtualSpace::release() {
 678   // This does not release memory it reserved.
 679   // Caller must release via rs.release();
 680   _low_boundary           = NULL;
 681   _high_boundary          = NULL;
 682   _low                    = NULL;
 683   _high                   = NULL;
 684   _lower_high             = NULL;
 685   _middle_high            = NULL;
 686   _upper_high             = NULL;
 687   _lower_high_boundary    = NULL;
 688   _middle_high_boundary   = NULL;
 689   _upper_high_boundary    = NULL;
 690   _lower_alignment        = 0;
 691   _middle_alignment       = 0;
 692   _upper_alignment        = 0;
 693   _special                = false;
 694   _executable             = false;
 695 }
 696 
 697 
 698 size_t VirtualSpace::committed_size() const {
 699   return pointer_delta(high(), low(), sizeof(char));
 700 }
 701 
 702 
 703 size_t VirtualSpace::reserved_size() const {
 704   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 705 }
 706 
 707 
 708 size_t VirtualSpace::uncommitted_size()  const {
 709   return reserved_size() - committed_size();
 710 }
 711 
 712 size_t VirtualSpace::actual_committed_size() const {
 713   // Special VirtualSpaces commit all reserved space up front.
 714   if (special()) {
 715     return reserved_size();
 716   }
 717 
 718   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 719   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 720   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 721 
 722 #ifdef ASSERT
 723   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 724   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 725   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 726 
 727   if (committed_high > 0) {
 728     assert(committed_low == lower, "Must be");
 729     assert(committed_middle == middle, "Must be");
 730   }
 731 
 732   if (committed_middle > 0) {
 733     assert(committed_low == lower, "Must be");
 734   }
 735   if (committed_middle < middle) {
 736     assert(committed_high == 0, "Must be");
 737   }
 738 
 739   if (committed_low < lower) {
 740     assert(committed_high == 0, "Must be");
 741     assert(committed_middle == 0, "Must be");
 742   }
 743 #endif
 744 
 745   return committed_low + committed_middle + committed_high;
 746 }
 747 
 748 
 749 bool VirtualSpace::contains(const void* p) const {
 750   return low() <= (const char*) p && (const char*) p < high();
 751 }
 752 
 753 static void pretouch_expanded_memory(void* start, void* end) {
 754   assert(is_aligned(start, os::vm_page_size()), "Unexpected alignment");
 755   assert(is_aligned(end,   os::vm_page_size()), "Unexpected alignment");
 756 
 757   os::pretouch_memory(start, end);
 758 }
 759 
 760 static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) {
 761   if (os::commit_memory(start, size, alignment, executable)) {
 762     if (pre_touch || AlwaysPreTouch) {
 763       pretouch_expanded_memory(start, start + size);
 764     }
 765     return true;
 766   }
 767 
 768   debug_only(warning(
 769       "INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMAT
 770       " size=" SIZE_FORMAT ", executable=%d) failed",
 771       p2i(start), p2i(start + size), size, executable);)
 772 
 773   return false;
 774 }
 775 
 776 /*
 777    First we need to determine if a particular virtual space is using large
 778    pages.  This is done at the initialize function and only virtual spaces
 779    that are larger than LargePageSizeInBytes use large pages.  Once we
 780    have determined this, all expand_by and shrink_by calls must grow and
 781    shrink by large page size chunks.  If a particular request
 782    is within the current large page, the call to commit and uncommit memory
 783    can be ignored.  In the case that the low and high boundaries of this
 784    space is not large page aligned, the pages leading to the first large
 785    page address and the pages after the last large page address must be
 786    allocated with default pages.
 787 */
 788 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 789   if (uncommitted_size() < bytes) {
 790     return false;
 791   }
 792 
 793   if (special()) {
 794     // don't commit memory if the entire space is pinned in memory
 795     _high += bytes;
 796     return true;
 797   }
 798 
 799   char* previous_high = high();
 800   char* unaligned_new_high = high() + bytes;
 801   assert(unaligned_new_high <= high_boundary(), "cannot expand by more than upper boundary");
 802 
 803   // Calculate where the new high for each of the regions should be.  If
 804   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 805   // then the unaligned lower and upper new highs would be the
 806   // lower_high() and upper_high() respectively.
 807   char* unaligned_lower_new_high =  MIN2(unaligned_new_high, lower_high_boundary());
 808   char* unaligned_middle_new_high = MIN2(unaligned_new_high, middle_high_boundary());
 809   char* unaligned_upper_new_high =  MIN2(unaligned_new_high, upper_high_boundary());
 810 
 811   // Align the new highs based on the regions alignment.  lower and upper
 812   // alignment will always be default page size.  middle alignment will be
 813   // LargePageSizeInBytes if the actual size of the virtual space is in
 814   // fact larger than LargePageSizeInBytes.
 815   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
 816   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
 817   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
 818 
 819   // Determine which regions need to grow in this expand_by call.
 820   // If you are growing in the lower region, high() must be in that
 821   // region so calculate the size based on high().  For the middle and
 822   // upper regions, determine the starting point of growth based on the
 823   // location of high().  By getting the MAX of the region's low address
 824   // (or the previous region's high address) and high(), we can tell if it
 825   // is an intra or inter region growth.
 826   size_t lower_needs = 0;
 827   if (aligned_lower_new_high > lower_high()) {
 828     lower_needs = pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 829   }
 830   size_t middle_needs = 0;
 831   if (aligned_middle_new_high > middle_high()) {
 832     middle_needs = pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 833   }
 834   size_t upper_needs = 0;
 835   if (aligned_upper_new_high > upper_high()) {
 836     upper_needs = pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 837   }
 838 
 839   // Check contiguity.
 840   assert(low_boundary() <= lower_high() && lower_high() <= lower_high_boundary(),
 841          "high address must be contained within the region");
 842   assert(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary(),
 843          "high address must be contained within the region");
 844   assert(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary(),
 845          "high address must be contained within the region");
 846 
 847   // Commit regions
 848   if (lower_needs > 0) {
 849     assert(lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region");
 850     if (!commit_expanded(lower_high(), lower_needs, _lower_alignment, pre_touch, _executable)) {
 851       return false;
 852     }
 853     _lower_high += lower_needs;
 854   }
 855 
 856   if (middle_needs > 0) {
 857     assert(middle_high() + middle_needs <= middle_high_boundary(), "must not expand beyond region");
 858     if (!commit_expanded(middle_high(), middle_needs, _middle_alignment, pre_touch, _executable)) {
 859       return false;
 860     }
 861     _middle_high += middle_needs;
 862   }
 863 
 864   if (upper_needs > 0) {
 865     assert(upper_high() + upper_needs <= upper_high_boundary(), "must not expand beyond region");
 866     if (!commit_expanded(upper_high(), upper_needs, _upper_alignment, pre_touch, _executable)) {
 867       return false;
 868     }
 869     _upper_high += upper_needs;
 870   }
 871 
 872   _high += bytes;
 873   return true;
 874 }
 875 
 876 // A page is uncommitted if the contents of the entire page is deemed unusable.
 877 // Continue to decrement the high() pointer until it reaches a page boundary
 878 // in which case that particular page can now be uncommitted.
 879 void VirtualSpace::shrink_by(size_t size) {
 880   if (committed_size() < size)
 881     fatal("Cannot shrink virtual space to negative size");
 882 
 883   if (special()) {
 884     // don't uncommit if the entire space is pinned in memory
 885     _high -= size;
 886     return;
 887   }
 888 
 889   char* unaligned_new_high = high() - size;
 890   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 891 
 892   // Calculate new unaligned address
 893   char* unaligned_upper_new_high =
 894     MAX2(unaligned_new_high, middle_high_boundary());
 895   char* unaligned_middle_new_high =
 896     MAX2(unaligned_new_high, lower_high_boundary());
 897   char* unaligned_lower_new_high =
 898     MAX2(unaligned_new_high, low_boundary());
 899 
 900   // Align address to region's alignment
 901   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
 902   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
 903   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
 904 
 905   // Determine which regions need to shrink
 906   size_t upper_needs = 0;
 907   if (aligned_upper_new_high < upper_high()) {
 908     upper_needs =
 909       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 910   }
 911   size_t middle_needs = 0;
 912   if (aligned_middle_new_high < middle_high()) {
 913     middle_needs =
 914       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 915   }
 916   size_t lower_needs = 0;
 917   if (aligned_lower_new_high < lower_high()) {
 918     lower_needs =
 919       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 920   }
 921 
 922   // Check contiguity.
 923   assert(middle_high_boundary() <= upper_high() &&
 924          upper_high() <= upper_high_boundary(),
 925          "high address must be contained within the region");
 926   assert(lower_high_boundary() <= middle_high() &&
 927          middle_high() <= middle_high_boundary(),
 928          "high address must be contained within the region");
 929   assert(low_boundary() <= lower_high() &&
 930          lower_high() <= lower_high_boundary(),
 931          "high address must be contained within the region");
 932 
 933   // Uncommit
 934   if (upper_needs > 0) {
 935     assert(middle_high_boundary() <= aligned_upper_new_high &&
 936            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 937            "must not shrink beyond region");
 938     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 939       debug_only(warning("os::uncommit_memory failed"));
 940       return;
 941     } else {
 942       _upper_high -= upper_needs;
 943     }
 944   }
 945   if (middle_needs > 0) {
 946     assert(lower_high_boundary() <= aligned_middle_new_high &&
 947            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 948            "must not shrink beyond region");
 949     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 950       debug_only(warning("os::uncommit_memory failed"));
 951       return;
 952     } else {
 953       _middle_high -= middle_needs;
 954     }
 955   }
 956   if (lower_needs > 0) {
 957     assert(low_boundary() <= aligned_lower_new_high &&
 958            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 959            "must not shrink beyond region");
 960     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 961       debug_only(warning("os::uncommit_memory failed"));
 962       return;
 963     } else {
 964       _lower_high -= lower_needs;
 965     }
 966   }
 967 
 968   _high -= size;
 969 }
 970 
 971 #ifndef PRODUCT
 972 void VirtualSpace::check_for_contiguity() {
 973   // Check contiguity.
 974   assert(low_boundary() <= lower_high() &&
 975          lower_high() <= lower_high_boundary(),
 976          "high address must be contained within the region");
 977   assert(lower_high_boundary() <= middle_high() &&
 978          middle_high() <= middle_high_boundary(),
 979          "high address must be contained within the region");
 980   assert(middle_high_boundary() <= upper_high() &&
 981          upper_high() <= upper_high_boundary(),
 982          "high address must be contained within the region");
 983   assert(low() >= low_boundary(), "low");
 984   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
 985   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
 986   assert(high() <= upper_high(), "upper high");
 987 }
 988 
 989 void VirtualSpace::print_on(outputStream* out) {
 990   out->print   ("Virtual space:");
 991   if (special()) out->print(" (pinned in memory)");
 992   out->cr();
 993   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
 994   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
 995   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low()), p2i(high()));
 996   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low_boundary()), p2i(high_boundary()));
 997 }
 998 
 999 void VirtualSpace::print() {
1000   print_on(tty);
1001 }
1002 
1003 /////////////// Unit tests ///////////////
1004 
1005 #ifndef PRODUCT
1006 
1007 #define test_log(...) \
1008   do {\
1009     if (VerboseInternalVMTests) { \
1010       tty->print_cr(__VA_ARGS__); \
1011       tty->flush(); \
1012     }\
1013   } while (false)
1014 
1015 class TestReservedSpace : AllStatic {
1016  public:
1017   static void small_page_write(void* addr, size_t size) {
1018     size_t page_size = os::vm_page_size();
1019 
1020     char* end = (char*)addr + size;
1021     for (char* p = (char*)addr; p < end; p += page_size) {
1022       *p = 1;
1023     }
1024   }
1025 
1026   static void release_memory_for_test(ReservedSpace rs) {
1027     if (rs.special()) {
1028       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1029     } else {
1030       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1031     }
1032   }
1033 
1034   static void test_reserved_space1(size_t size, size_t alignment) {
1035     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
1036 
1037     assert(is_aligned(size, alignment), "Incorrect input parameters");
1038 
1039     ReservedSpace rs(size,          // size
1040                      alignment,     // alignment
1041                      UseLargePages, // large
1042                      (char *)NULL); // requested_address
1043 
1044     test_log(" rs.special() == %d", rs.special());
1045 
1046     assert(rs.base() != NULL, "Must be");
1047     assert(rs.size() == size, "Must be");
1048 
1049     assert(is_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1050     assert(is_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1051 
1052     if (rs.special()) {
1053       small_page_write(rs.base(), size);
1054     }
1055 
1056     release_memory_for_test(rs);
1057   }
1058 
1059   static void test_reserved_space2(size_t size) {
1060     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
1061 
1062     assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1063 
1064     ReservedSpace rs(size);
1065 
1066     test_log(" rs.special() == %d", rs.special());
1067 
1068     assert(rs.base() != NULL, "Must be");
1069     assert(rs.size() == size, "Must be");
1070 
1071     if (rs.special()) {
1072       small_page_write(rs.base(), size);
1073     }
1074 
1075     release_memory_for_test(rs);
1076   }
1077 
1078   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1079     test_log("test_reserved_space3(%p, %p, %d)",
1080         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
1081 
1082     if (size < alignment) {
1083       // Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test.
1084       assert((size_t)os::vm_page_size() == os::large_page_size(), "Test needs further refinement");
1085       return;
1086     }
1087 
1088     assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1089     assert(is_aligned(size, alignment), "Must be at least aligned against alignment");
1090 
1091     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1092 
1093     ReservedSpace rs(size, alignment, large, false);
1094 
1095     test_log(" rs.special() == %d", rs.special());
1096 
1097     assert(rs.base() != NULL, "Must be");
1098     assert(rs.size() == size, "Must be");
1099 
1100     if (rs.special()) {
1101       small_page_write(rs.base(), size);
1102     }
1103 
1104     release_memory_for_test(rs);
1105   }
1106 
1107 
1108   static void test_reserved_space1() {
1109     size_t size = 2 * 1024 * 1024;
1110     size_t ag   = os::vm_allocation_granularity();
1111 
1112     test_reserved_space1(size,      ag);
1113     test_reserved_space1(size * 2,  ag);
1114     test_reserved_space1(size * 10, ag);
1115   }
1116 
1117   static void test_reserved_space2() {
1118     size_t size = 2 * 1024 * 1024;
1119     size_t ag = os::vm_allocation_granularity();
1120 
1121     test_reserved_space2(size * 1);
1122     test_reserved_space2(size * 2);
1123     test_reserved_space2(size * 10);
1124     test_reserved_space2(ag);
1125     test_reserved_space2(size - ag);
1126     test_reserved_space2(size);
1127     test_reserved_space2(size + ag);
1128     test_reserved_space2(size * 2);
1129     test_reserved_space2(size * 2 - ag);
1130     test_reserved_space2(size * 2 + ag);
1131     test_reserved_space2(size * 3);
1132     test_reserved_space2(size * 3 - ag);
1133     test_reserved_space2(size * 3 + ag);
1134     test_reserved_space2(size * 10);
1135     test_reserved_space2(size * 10 + size / 2);
1136   }
1137 
1138   static void test_reserved_space3() {
1139     size_t ag = os::vm_allocation_granularity();
1140 
1141     test_reserved_space3(ag,      ag    , false);
1142     test_reserved_space3(ag * 2,  ag    , false);
1143     test_reserved_space3(ag * 3,  ag    , false);
1144     test_reserved_space3(ag * 2,  ag * 2, false);
1145     test_reserved_space3(ag * 4,  ag * 2, false);
1146     test_reserved_space3(ag * 8,  ag * 2, false);
1147     test_reserved_space3(ag * 4,  ag * 4, false);
1148     test_reserved_space3(ag * 8,  ag * 4, false);
1149     test_reserved_space3(ag * 16, ag * 4, false);
1150 
1151     if (UseLargePages) {
1152       size_t lp = os::large_page_size();
1153 
1154       // Without large pages
1155       test_reserved_space3(lp,     ag * 4, false);
1156       test_reserved_space3(lp * 2, ag * 4, false);
1157       test_reserved_space3(lp * 4, ag * 4, false);
1158       test_reserved_space3(lp,     lp    , false);
1159       test_reserved_space3(lp * 2, lp    , false);
1160       test_reserved_space3(lp * 3, lp    , false);
1161       test_reserved_space3(lp * 2, lp * 2, false);
1162       test_reserved_space3(lp * 4, lp * 2, false);
1163       test_reserved_space3(lp * 8, lp * 2, false);
1164 
1165       // With large pages
1166       test_reserved_space3(lp, ag * 4    , true);
1167       test_reserved_space3(lp * 2, ag * 4, true);
1168       test_reserved_space3(lp * 4, ag * 4, true);
1169       test_reserved_space3(lp, lp        , true);
1170       test_reserved_space3(lp * 2, lp    , true);
1171       test_reserved_space3(lp * 3, lp    , true);
1172       test_reserved_space3(lp * 2, lp * 2, true);
1173       test_reserved_space3(lp * 4, lp * 2, true);
1174       test_reserved_space3(lp * 8, lp * 2, true);
1175     }
1176   }
1177 
1178   static void test_reserved_space() {
1179     test_reserved_space1();
1180     test_reserved_space2();
1181     test_reserved_space3();
1182   }
1183 };
1184 
1185 void TestReservedSpace_test() {
1186   TestReservedSpace::test_reserved_space();
1187 }
1188 
1189 #define assert_equals(actual, expected)  \
1190   assert(actual == expected,             \
1191          "Got " SIZE_FORMAT " expected " \
1192          SIZE_FORMAT, actual, expected);
1193 
1194 #define assert_ge(value1, value2)                  \
1195   assert(value1 >= value2,                         \
1196          "'" #value1 "': " SIZE_FORMAT " '"        \
1197          #value2 "': " SIZE_FORMAT, value1, value2);
1198 
1199 #define assert_lt(value1, value2)                  \
1200   assert(value1 < value2,                          \
1201          "'" #value1 "': " SIZE_FORMAT " '"        \
1202          #value2 "': " SIZE_FORMAT, value1, value2);
1203 
1204 
1205 class TestVirtualSpace : AllStatic {
1206   enum TestLargePages {
1207     Default,
1208     Disable,
1209     Reserve,
1210     Commit
1211   };
1212 
1213   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
1214     switch(mode) {
1215     default:
1216     case Default:
1217     case Reserve:
1218       return ReservedSpace(reserve_size_aligned);
1219     case Disable:
1220     case Commit:
1221       return ReservedSpace(reserve_size_aligned,
1222                            os::vm_allocation_granularity(),
1223                            /* large */ false, /* exec */ false);
1224     }
1225   }
1226 
1227   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1228     switch(mode) {
1229     default:
1230     case Default:
1231     case Reserve:
1232       return vs.initialize(rs, 0);
1233     case Disable:
1234       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1235     case Commit:
1236       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1237     }
1238   }
1239 
1240  public:
1241   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1242                                                         TestLargePages mode = Default) {
1243     size_t granularity = os::vm_allocation_granularity();
1244     size_t reserve_size_aligned = align_up(reserve_size, granularity);
1245 
1246     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1247 
1248     assert(reserved.is_reserved(), "Must be");
1249 
1250     VirtualSpace vs;
1251     bool initialized = initialize_virtual_space(vs, reserved, mode);
1252     assert(initialized, "Failed to initialize VirtualSpace");
1253 
1254     vs.expand_by(commit_size, false);
1255 
1256     if (vs.special()) {
1257       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1258     } else {
1259       assert_ge(vs.actual_committed_size(), commit_size);
1260       // Approximate the commit granularity.
1261       // Make sure that we don't commit using large pages
1262       // if large pages has been disabled for this VirtualSpace.
1263       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1264                                    os::vm_page_size() : os::large_page_size();
1265       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1266     }
1267 
1268     reserved.release();
1269   }
1270 
1271   static void test_virtual_space_actual_committed_space_one_large_page() {
1272     if (!UseLargePages) {
1273       return;
1274     }
1275 
1276     size_t large_page_size = os::large_page_size();
1277 
1278     ReservedSpace reserved(large_page_size, large_page_size, true, false);
1279 
1280     assert(reserved.is_reserved(), "Must be");
1281 
1282     VirtualSpace vs;
1283     bool initialized = vs.initialize(reserved, 0);
1284     assert(initialized, "Failed to initialize VirtualSpace");
1285 
1286     vs.expand_by(large_page_size, false);
1287 
1288     assert_equals(vs.actual_committed_size(), large_page_size);
1289 
1290     reserved.release();
1291   }
1292 
1293   static void test_virtual_space_actual_committed_space() {
1294     test_virtual_space_actual_committed_space(4 * K, 0);
1295     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1296     test_virtual_space_actual_committed_space(8 * K, 0);
1297     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1298     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1299     test_virtual_space_actual_committed_space(12 * K, 0);
1300     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1301     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1302     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1303     test_virtual_space_actual_committed_space(64 * K, 0);
1304     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1305     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1306     test_virtual_space_actual_committed_space(2 * M, 0);
1307     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1308     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1309     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1310     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1311     test_virtual_space_actual_committed_space(10 * M, 0);
1312     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1313     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1314     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1315     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1316     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1317     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1318   }
1319 
1320   static void test_virtual_space_disable_large_pages() {
1321     if (!UseLargePages) {
1322       return;
1323     }
1324     // These test cases verify that if we force VirtualSpace to disable large pages
1325     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1326     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1327     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1328     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1329     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1330     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1331     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1332 
1333     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1334     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1335     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1336     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1337     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1338     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1339     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1340 
1341     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1342     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1343     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1344     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1345     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1346     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1347     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1348   }
1349 
1350   static void test_virtual_space() {
1351     test_virtual_space_actual_committed_space();
1352     test_virtual_space_actual_committed_space_one_large_page();
1353     test_virtual_space_disable_large_pages();
1354   }
1355 };
1356 
1357 void TestVirtualSpace_test() {
1358   TestVirtualSpace::test_virtual_space();
1359 }
1360 
1361 #endif // PRODUCT
1362 
1363 #endif