1 /*
   2  * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeCacheExtensions.hpp"
  27 #include "logging/log.hpp"
  28 #include "memory/resourceArea.hpp"
  29 #include "memory/virtualspace.hpp"
  30 #include "oops/markOop.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "services/memTracker.hpp"
  33 
  34 // ReservedSpace
  35 
  36 // Dummy constructor
  37 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  38     _alignment(0), _special(false), _executable(false) {
  39 }
  40 
  41 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) {
  42   bool has_preferred_page_size = preferred_page_size != 0;
  43   // Want to use large pages where possible and pad with small pages.
  44   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
  45   bool large_pages = page_size != (size_t)os::vm_page_size();
  46   size_t alignment;
  47   if (large_pages && has_preferred_page_size) {
  48     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  49     // ReservedSpace initialization requires size to be aligned to the given
  50     // alignment. Align the size up.
  51     size = align_size_up(size, alignment);
  52   } else {
  53     // Don't force the alignment to be large page aligned,
  54     // since that will waste memory.
  55     alignment = os::vm_allocation_granularity();
  56   }
  57   initialize(size, alignment, large_pages, NULL, false);
  58 }
  59 
  60 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  61                              bool large,
  62                              char* requested_address) {
  63   initialize(size, alignment, large, requested_address, false);
  64 }
  65 
  66 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  67                              bool large,
  68                              bool executable) {
  69   initialize(size, alignment, large, NULL, executable);
  70 }
  71 
  72 // Helper method.
  73 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  74                                            const size_t size, bool special)
  75 {
  76   if (base == requested_address || requested_address == NULL)
  77     return false; // did not fail
  78 
  79   if (base != NULL) {
  80     // Different reserve address may be acceptable in other cases
  81     // but for compressed oops heap should be at requested address.
  82     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  83     log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
  84     // OS ignored requested address. Try different address.
  85     if (special) {
  86       if (!os::release_memory_special(base, size)) {
  87         fatal("os::release_memory_special failed");
  88       }
  89     } else {
  90       if (!os::release_memory(base, size)) {
  91         fatal("os::release_memory failed");
  92       }
  93     }
  94   }
  95   return true;
  96 }
  97 
  98 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
  99                                char* requested_address,
 100                                bool executable) {
 101   const size_t granularity = os::vm_allocation_granularity();
 102   assert((size & (granularity - 1)) == 0,
 103          "size not aligned to os::vm_allocation_granularity()");
 104   assert((alignment & (granularity - 1)) == 0,
 105          "alignment not aligned to os::vm_allocation_granularity()");
 106   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 107          "not a power of 2");
 108 
 109   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 110 
 111   _base = NULL;
 112   _size = 0;
 113   _special = false;
 114   _executable = executable;
 115   _alignment = 0;
 116   _noaccess_prefix = 0;
 117   if (size == 0) {
 118     return;
 119   }
 120 
 121   // If OS doesn't support demand paging for large page memory, we need
 122   // to use reserve_memory_special() to reserve and pin the entire region.
 123   bool special = large && !os::can_commit_large_page_memory();
 124   char* base = NULL;
 125 
 126   if (special) {
 127 
 128     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 129 
 130     if (base != NULL) {
 131       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 132         // OS ignored requested address. Try different address.
 133         return;
 134       }
 135       // Check alignment constraints.
 136       assert((uintptr_t) base % alignment == 0,
 137              "Large pages returned a non-aligned address, base: "
 138              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 139              p2i(base), alignment);
 140       _special = true;
 141     } else {
 142       // failed; try to reserve regular memory below
 143       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 144                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 145         log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 146       }
 147     }
 148   }
 149 
 150   if (base == NULL) {
 151     // Optimistically assume that the OSes returns an aligned base pointer.
 152     // When reserving a large address range, most OSes seem to align to at
 153     // least 64K.
 154 
 155     // If the memory was requested at a particular address, use
 156     // os::attempt_reserve_memory_at() to avoid over mapping something
 157     // important.  If available space is not detected, return NULL.
 158 
 159     if (requested_address != 0) {
 160       base = os::attempt_reserve_memory_at(size, requested_address);
 161       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 162         // OS ignored requested address. Try different address.
 163         base = NULL;
 164       }
 165     } else {
 166       base = os::reserve_memory(size, NULL, alignment);
 167     }
 168 
 169     if (base == NULL) return;
 170 
 171     // Check alignment constraints
 172     if ((((size_t)base) & (alignment - 1)) != 0) {
 173       // Base not aligned, retry
 174       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 175       // Make sure that size is aligned
 176       size = align_size_up(size, alignment);
 177       base = os::reserve_memory_aligned(size, alignment);
 178 
 179       if (requested_address != 0 &&
 180           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 181         // As a result of the alignment constraints, the allocated base differs
 182         // from the requested address. Return back to the caller who can
 183         // take remedial action (like try again without a requested address).
 184         assert(_base == NULL, "should be");
 185         return;
 186       }
 187     }
 188   }
 189   // Done
 190   _base = base;
 191   _size = size;
 192   _alignment = alignment;
 193 }
 194 
 195 
 196 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 197                              bool special, bool executable) {
 198   assert((size % os::vm_allocation_granularity()) == 0,
 199          "size not allocation aligned");
 200   _base = base;
 201   _size = size;
 202   _alignment = alignment;
 203   _noaccess_prefix = 0;
 204   _special = special;
 205   _executable = executable;
 206 }
 207 
 208 
 209 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 210                                         bool split, bool realloc) {
 211   assert(partition_size <= size(), "partition failed");
 212   if (split) {
 213     os::split_reserved_memory(base(), size(), partition_size, realloc);
 214   }
 215   ReservedSpace result(base(), partition_size, alignment, special(),
 216                        executable());
 217   return result;
 218 }
 219 
 220 
 221 ReservedSpace
 222 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 223   assert(partition_size <= size(), "partition failed");
 224   ReservedSpace result(base() + partition_size, size() - partition_size,
 225                        alignment, special(), executable());
 226   return result;
 227 }
 228 
 229 
 230 size_t ReservedSpace::page_align_size_up(size_t size) {
 231   return align_size_up(size, os::vm_page_size());
 232 }
 233 
 234 
 235 size_t ReservedSpace::page_align_size_down(size_t size) {
 236   return align_size_down(size, os::vm_page_size());
 237 }
 238 
 239 
 240 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 241   return align_size_up(size, os::vm_allocation_granularity());
 242 }
 243 
 244 
 245 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 246   return align_size_down(size, os::vm_allocation_granularity());
 247 }
 248 
 249 
 250 void ReservedSpace::release() {
 251   if (is_reserved()) {
 252     char *real_base = _base - _noaccess_prefix;
 253     const size_t real_size = _size + _noaccess_prefix;
 254     if (special()) {
 255       os::release_memory_special(real_base, real_size);
 256     } else{
 257       os::release_memory(real_base, real_size);
 258     }
 259     _base = NULL;
 260     _size = 0;
 261     _noaccess_prefix = 0;
 262     _alignment = 0;
 263     _special = false;
 264     _executable = false;
 265   }
 266 }
 267 
 268 static size_t noaccess_prefix_size(size_t alignment) {
 269   return lcm(os::vm_page_size(), alignment);
 270 }
 271 
 272 void ReservedHeapSpace::establish_noaccess_prefix() {
 273   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 274   _noaccess_prefix = noaccess_prefix_size(_alignment);
 275 
 276   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 277     if (true
 278         WIN64_ONLY(&& !UseLargePages)
 279         AIX_ONLY(&& os::vm_page_size() != SIZE_64K)) {
 280       // Protect memory at the base of the allocated region.
 281       // If special, the page was committed (only matters on windows)
 282       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 283         fatal("cannot protect protection page");
 284       }
 285       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
 286                                  PTR_FORMAT " / " INTX_FORMAT " bytes",
 287                                  p2i(_base),
 288                                  _noaccess_prefix);
 289       assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
 290     } else {
 291       Universe::set_narrow_oop_use_implicit_null_checks(false);
 292     }
 293   }
 294 
 295   _base += _noaccess_prefix;
 296   _size -= _noaccess_prefix;
 297   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 298 }
 299 
 300 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 301 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 302 // might still fulfill the wishes of the caller.
 303 // Assures the memory is aligned to 'alignment'.
 304 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 305 void ReservedHeapSpace::try_reserve_heap(size_t size,
 306                                          size_t alignment,
 307                                          bool large,
 308                                          char* requested_address) {
 309   if (_base != NULL) {
 310     // We tried before, but we didn't like the address delivered.
 311     release();
 312   }
 313 
 314   // If OS doesn't support demand paging for large page memory, we need
 315   // to use reserve_memory_special() to reserve and pin the entire region.
 316   bool special = large && !os::can_commit_large_page_memory();
 317   char* base = NULL;
 318 
 319   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
 320                              " heap of size " SIZE_FORMAT_HEX,
 321                              p2i(requested_address),
 322                              size);
 323 
 324   if (special) {
 325     base = os::reserve_memory_special(size, alignment, requested_address, false);
 326 
 327     if (base != NULL) {
 328       // Check alignment constraints.
 329       assert((uintptr_t) base % alignment == 0,
 330              "Large pages returned a non-aligned address, base: "
 331              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 332              p2i(base), alignment);
 333       _special = true;
 334     }
 335   }
 336 
 337   if (base == NULL) {
 338     // Failed; try to reserve regular memory below
 339     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 340                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 341       log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 342     }
 343 
 344     // Optimistically assume that the OSes returns an aligned base pointer.
 345     // When reserving a large address range, most OSes seem to align to at
 346     // least 64K.
 347 
 348     // If the memory was requested at a particular address, use
 349     // os::attempt_reserve_memory_at() to avoid over mapping something
 350     // important.  If available space is not detected, return NULL.
 351 
 352     if (requested_address != 0) {
 353       base = os::attempt_reserve_memory_at(size, requested_address);
 354     } else {
 355       base = os::reserve_memory(size, NULL, alignment);
 356     }
 357   }
 358   if (base == NULL) { return; }
 359 
 360   // Done
 361   _base = base;
 362   _size = size;
 363   _alignment = alignment;
 364 
 365   // Check alignment constraints
 366   if ((((size_t)base) & (alignment - 1)) != 0) {
 367     // Base not aligned, retry.
 368     release();
 369   }
 370 }
 371 
 372 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 373                                           char *lowest_start,
 374                                           size_t attach_point_alignment,
 375                                           char *aligned_heap_base_min_address,
 376                                           char *upper_bound,
 377                                           size_t size,
 378                                           size_t alignment,
 379                                           bool large) {
 380   const size_t attach_range = highest_start - lowest_start;
 381   // Cap num_attempts at possible number.
 382   // At least one is possible even for 0 sized attach range.
 383   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
 384   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
 385 
 386   const size_t stepsize = (attach_range == 0) ? // Only one try.
 387     (size_t) highest_start : align_size_up(attach_range / num_attempts_to_try, attach_point_alignment);
 388 
 389   // Try attach points from top to bottom.
 390   char* attach_point = highest_start;
 391   while (attach_point >= lowest_start  &&
 392          attach_point <= highest_start &&  // Avoid wrap around.
 393          ((_base == NULL) ||
 394           (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
 395     try_reserve_heap(size, alignment, large, attach_point);
 396     attach_point -= stepsize;
 397   }
 398 }
 399 
 400 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
 401 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
 402 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
 403 
 404 // Helper for heap allocation. Returns an array with addresses
 405 // (OS-specific) which are suited for disjoint base mode. Array is
 406 // NULL terminated.
 407 static char** get_attach_addresses_for_disjoint_mode() {
 408   static uint64_t addresses[] = {
 409      2 * SIZE_32G,
 410      3 * SIZE_32G,
 411      4 * SIZE_32G,
 412      8 * SIZE_32G,
 413     10 * SIZE_32G,
 414      1 * SIZE_64K * SIZE_32G,
 415      2 * SIZE_64K * SIZE_32G,
 416      3 * SIZE_64K * SIZE_32G,
 417      4 * SIZE_64K * SIZE_32G,
 418     16 * SIZE_64K * SIZE_32G,
 419     32 * SIZE_64K * SIZE_32G,
 420     34 * SIZE_64K * SIZE_32G,
 421     0
 422   };
 423 
 424   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
 425   // the array is sorted.
 426   uint i = 0;
 427   while (addresses[i] != 0 &&
 428          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
 429     i++;
 430   }
 431   uint start = i;
 432 
 433   // Avoid more steps than requested.
 434   i = 0;
 435   while (addresses[start+i] != 0) {
 436     if (i == HeapSearchSteps) {
 437       addresses[start+i] = 0;
 438       break;
 439     }
 440     i++;
 441   }
 442 
 443   return (char**) &addresses[start];
 444 }
 445 
 446 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
 447   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
 448             "can not allocate compressed oop heap for this size");
 449   guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
 450   assert(HeapBaseMinAddress > 0, "sanity");
 451 
 452   const size_t granularity = os::vm_allocation_granularity();
 453   assert((size & (granularity - 1)) == 0,
 454          "size not aligned to os::vm_allocation_granularity()");
 455   assert((alignment & (granularity - 1)) == 0,
 456          "alignment not aligned to os::vm_allocation_granularity()");
 457   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 458          "not a power of 2");
 459 
 460   // The necessary attach point alignment for generated wish addresses.
 461   // This is needed to increase the chance of attaching for mmap and shmat.
 462   const size_t os_attach_point_alignment =
 463     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
 464     NOT_AIX(os::vm_allocation_granularity());
 465   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 466 
 467   char *aligned_heap_base_min_address = (char *)align_ptr_up((void *)HeapBaseMinAddress, alignment);
 468   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
 469     noaccess_prefix_size(alignment) : 0;
 470 
 471   // Attempt to alloc at user-given address.
 472   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 473     try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
 474     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
 475       release();
 476     }
 477   }
 478 
 479   // Keep heap at HeapBaseMinAddress.
 480   if (_base == NULL) {
 481 
 482     // Try to allocate the heap at addresses that allow efficient oop compression.
 483     // Different schemes are tried, in order of decreasing optimization potential.
 484     //
 485     // For this, try_reserve_heap() is called with the desired heap base addresses.
 486     // A call into the os layer to allocate at a given address can return memory
 487     // at a different address than requested.  Still, this might be memory at a useful
 488     // address. try_reserve_heap() always returns this allocated memory, as only here
 489     // the criteria for a good heap are checked.
 490 
 491     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 492     // Give it several tries from top of range to bottom.
 493     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
 494 
 495       // Calc address range within we try to attach (range of possible start addresses).
 496       char* const highest_start = (char *)align_ptr_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 497       char* const lowest_start  = (char *)align_ptr_up(aligned_heap_base_min_address, attach_point_alignment);
 498       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 499                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
 500     }
 501 
 502     // zerobased: Attempt to allocate in the lower 32G.
 503     // But leave room for the compressed class pointers, which is allocated above
 504     // the heap.
 505     char *zerobased_max = (char *)OopEncodingHeapMax;
 506     const size_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
 507     // For small heaps, save some space for compressed class pointer
 508     // space so it can be decoded with no base.
 509     if (UseCompressedClassPointers && !UseSharedSpaces &&
 510         OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
 511         (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
 512       zerobased_max = (char *)OopEncodingHeapMax - class_space;
 513     }
 514 
 515     // Give it several tries from top of range to bottom.
 516     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
 517         ((_base == NULL) ||                        // No previous try succeeded.
 518          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 519 
 520       // Calc address range within we try to attach (range of possible start addresses).
 521       char *const highest_start = (char *)align_ptr_down(zerobased_max - size, attach_point_alignment);
 522       // Need to be careful about size being guaranteed to be less
 523       // than UnscaledOopHeapMax due to type constraints.
 524       char *lowest_start = aligned_heap_base_min_address;
 525       uint64_t unscaled_end = UnscaledOopHeapMax - size;
 526       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
 527         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
 528       }
 529       lowest_start  = (char *)align_ptr_up(lowest_start, attach_point_alignment);
 530       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 531                         aligned_heap_base_min_address, zerobased_max, size, alignment, large);
 532     }
 533 
 534     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 535     // implement null checks.
 536     noaccess_prefix = noaccess_prefix_size(alignment);
 537 
 538     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
 539     char** addresses = get_attach_addresses_for_disjoint_mode();
 540     int i = 0;
 541     while (addresses[i] &&                                 // End of array not yet reached.
 542            ((_base == NULL) ||                             // No previous try succeeded.
 543             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 544              !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 545       char* const attach_point = addresses[i];
 546       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 547       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 548       i++;
 549     }
 550 
 551     // Last, desperate try without any placement.
 552     if (_base == NULL) {
 553       log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
 554       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 555     }
 556   }
 557 }
 558 
 559 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() {
 560 
 561   if (size == 0) {
 562     return;
 563   }
 564 
 565   // Heap size should be aligned to alignment, too.
 566   guarantee(is_size_aligned(size, alignment), "set by caller");
 567 
 568   if (UseCompressedOops) {
 569     initialize_compressed_heap(size, alignment, large);
 570     if (_size > size) {
 571       // We allocated heap with noaccess prefix.
 572       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 573       // if we had to try at arbitrary address.
 574       establish_noaccess_prefix();
 575     }
 576   } else {
 577     initialize(size, alignment, large, NULL, false);
 578   }
 579 
 580   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 581          "area must be distinguishable from marks for mark-sweep");
 582   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 583          "area must be distinguishable from marks for mark-sweep");
 584 
 585   if (base() > 0) {
 586     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 587   }
 588 }
 589 
 590 // Reserve space for code segment.  Same as Java heap only we mark this as
 591 // executable.
 592 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 593                                      size_t rs_align,
 594                                      bool large) :
 595   ReservedSpace(r_size, rs_align, large, /*executable*/ CodeCacheExtensions::support_dynamic_code()) {
 596   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 597 }
 598 
 599 // VirtualSpace
 600 
 601 VirtualSpace::VirtualSpace() {
 602   _low_boundary           = NULL;
 603   _high_boundary          = NULL;
 604   _low                    = NULL;
 605   _high                   = NULL;
 606   _lower_high             = NULL;
 607   _middle_high            = NULL;
 608   _upper_high             = NULL;
 609   _lower_high_boundary    = NULL;
 610   _middle_high_boundary   = NULL;
 611   _upper_high_boundary    = NULL;
 612   _lower_alignment        = 0;
 613   _middle_alignment       = 0;
 614   _upper_alignment        = 0;
 615   _special                = false;
 616   _executable             = false;
 617 }
 618 
 619 
 620 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 621   const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
 622   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 623 }
 624 
 625 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 626   if(!rs.is_reserved()) return false;  // allocation failed.
 627   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 628   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 629 
 630   _low_boundary  = rs.base();
 631   _high_boundary = low_boundary() + rs.size();
 632 
 633   _low = low_boundary();
 634   _high = low();
 635 
 636   _special = rs.special();
 637   _executable = rs.executable();
 638 
 639   // When a VirtualSpace begins life at a large size, make all future expansion
 640   // and shrinking occur aligned to a granularity of large pages.  This avoids
 641   // fragmentation of physical addresses that inhibits the use of large pages
 642   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 643   // page size, the only spaces that get handled this way are codecache and
 644   // the heap itself, both of which provide a substantial performance
 645   // boost in many benchmarks when covered by large pages.
 646   //
 647   // No attempt is made to force large page alignment at the very top and
 648   // bottom of the space if they are not aligned so already.
 649   _lower_alignment  = os::vm_page_size();
 650   _middle_alignment = max_commit_granularity;
 651   _upper_alignment  = os::vm_page_size();
 652 
 653   // End of each region
 654   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
 655   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
 656   _upper_high_boundary = high_boundary();
 657 
 658   // High address of each region
 659   _lower_high = low_boundary();
 660   _middle_high = lower_high_boundary();
 661   _upper_high = middle_high_boundary();
 662 
 663   // commit to initial size
 664   if (committed_size > 0) {
 665     if (!expand_by(committed_size)) {
 666       return false;
 667     }
 668   }
 669   return true;
 670 }
 671 
 672 
 673 VirtualSpace::~VirtualSpace() {
 674   release();
 675 }
 676 
 677 
 678 void VirtualSpace::release() {
 679   // This does not release memory it never reserved.
 680   // Caller must release via rs.release();
 681   _low_boundary           = NULL;
 682   _high_boundary          = NULL;
 683   _low                    = NULL;
 684   _high                   = NULL;
 685   _lower_high             = NULL;
 686   _middle_high            = NULL;
 687   _upper_high             = NULL;
 688   _lower_high_boundary    = NULL;
 689   _middle_high_boundary   = NULL;
 690   _upper_high_boundary    = NULL;
 691   _lower_alignment        = 0;
 692   _middle_alignment       = 0;
 693   _upper_alignment        = 0;
 694   _special                = false;
 695   _executable             = false;
 696 }
 697 
 698 
 699 size_t VirtualSpace::committed_size() const {
 700   return pointer_delta(high(), low(), sizeof(char));
 701 }
 702 
 703 
 704 size_t VirtualSpace::reserved_size() const {
 705   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 706 }
 707 
 708 
 709 size_t VirtualSpace::uncommitted_size()  const {
 710   return reserved_size() - committed_size();
 711 }
 712 
 713 size_t VirtualSpace::actual_committed_size() const {
 714   // Special VirtualSpaces commit all reserved space up front.
 715   if (special()) {
 716     return reserved_size();
 717   }
 718 
 719   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 720   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 721   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 722 
 723 #ifdef ASSERT
 724   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 725   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 726   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 727 
 728   if (committed_high > 0) {
 729     assert(committed_low == lower, "Must be");
 730     assert(committed_middle == middle, "Must be");
 731   }
 732 
 733   if (committed_middle > 0) {
 734     assert(committed_low == lower, "Must be");
 735   }
 736   if (committed_middle < middle) {
 737     assert(committed_high == 0, "Must be");
 738   }
 739 
 740   if (committed_low < lower) {
 741     assert(committed_high == 0, "Must be");
 742     assert(committed_middle == 0, "Must be");
 743   }
 744 #endif
 745 
 746   return committed_low + committed_middle + committed_high;
 747 }
 748 
 749 
 750 bool VirtualSpace::contains(const void* p) const {
 751   return low() <= (const char*) p && (const char*) p < high();
 752 }
 753 
 754 /*
 755    First we need to determine if a particular virtual space is using large
 756    pages.  This is done at the initialize function and only virtual spaces
 757    that are larger than LargePageSizeInBytes use large pages.  Once we
 758    have determined this, all expand_by and shrink_by calls must grow and
 759    shrink by large page size chunks.  If a particular request
 760    is within the current large page, the call to commit and uncommit memory
 761    can be ignored.  In the case that the low and high boundaries of this
 762    space is not large page aligned, the pages leading to the first large
 763    page address and the pages after the last large page address must be
 764    allocated with default pages.
 765 */
 766 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 767   if (uncommitted_size() < bytes) return false;
 768 
 769   if (special()) {
 770     // don't commit memory if the entire space is pinned in memory
 771     _high += bytes;
 772     return true;
 773   }
 774 
 775   char* previous_high = high();
 776   char* unaligned_new_high = high() + bytes;
 777   assert(unaligned_new_high <= high_boundary(),
 778          "cannot expand by more than upper boundary");
 779 
 780   // Calculate where the new high for each of the regions should be.  If
 781   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 782   // then the unaligned lower and upper new highs would be the
 783   // lower_high() and upper_high() respectively.
 784   char* unaligned_lower_new_high =
 785     MIN2(unaligned_new_high, lower_high_boundary());
 786   char* unaligned_middle_new_high =
 787     MIN2(unaligned_new_high, middle_high_boundary());
 788   char* unaligned_upper_new_high =
 789     MIN2(unaligned_new_high, upper_high_boundary());
 790 
 791   // Align the new highs based on the regions alignment.  lower and upper
 792   // alignment will always be default page size.  middle alignment will be
 793   // LargePageSizeInBytes if the actual size of the virtual space is in
 794   // fact larger than LargePageSizeInBytes.
 795   char* aligned_lower_new_high =
 796     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 797   char* aligned_middle_new_high =
 798     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 799   char* aligned_upper_new_high =
 800     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 801 
 802   // Determine which regions need to grow in this expand_by call.
 803   // If you are growing in the lower region, high() must be in that
 804   // region so calculate the size based on high().  For the middle and
 805   // upper regions, determine the starting point of growth based on the
 806   // location of high().  By getting the MAX of the region's low address
 807   // (or the previous region's high address) and high(), we can tell if it
 808   // is an intra or inter region growth.
 809   size_t lower_needs = 0;
 810   if (aligned_lower_new_high > lower_high()) {
 811     lower_needs =
 812       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 813   }
 814   size_t middle_needs = 0;
 815   if (aligned_middle_new_high > middle_high()) {
 816     middle_needs =
 817       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 818   }
 819   size_t upper_needs = 0;
 820   if (aligned_upper_new_high > upper_high()) {
 821     upper_needs =
 822       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 823   }
 824 
 825   // Check contiguity.
 826   assert(low_boundary() <= lower_high() &&
 827          lower_high() <= lower_high_boundary(),
 828          "high address must be contained within the region");
 829   assert(lower_high_boundary() <= middle_high() &&
 830          middle_high() <= middle_high_boundary(),
 831          "high address must be contained within the region");
 832   assert(middle_high_boundary() <= upper_high() &&
 833          upper_high() <= upper_high_boundary(),
 834          "high address must be contained within the region");
 835 
 836   // Commit regions
 837   if (lower_needs > 0) {
 838     assert(low_boundary() <= lower_high() &&
 839            lower_high() + lower_needs <= lower_high_boundary(),
 840            "must not expand beyond region");
 841     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
 842       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 843                          ", lower_needs=" SIZE_FORMAT ", %d) failed",
 844                          p2i(lower_high()), lower_needs, _executable);)
 845       return false;
 846     } else {
 847       _lower_high += lower_needs;
 848     }
 849   }
 850   if (middle_needs > 0) {
 851     assert(lower_high_boundary() <= middle_high() &&
 852            middle_high() + middle_needs <= middle_high_boundary(),
 853            "must not expand beyond region");
 854     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
 855                            _executable)) {
 856       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 857                          ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
 858                          ", %d) failed", p2i(middle_high()), middle_needs,
 859                          middle_alignment(), _executable);)
 860       return false;
 861     }
 862     _middle_high += middle_needs;
 863   }
 864   if (upper_needs > 0) {
 865     assert(middle_high_boundary() <= upper_high() &&
 866            upper_high() + upper_needs <= upper_high_boundary(),
 867            "must not expand beyond region");
 868     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
 869       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 870                          ", upper_needs=" SIZE_FORMAT ", %d) failed",
 871                          p2i(upper_high()), upper_needs, _executable);)
 872       return false;
 873     } else {
 874       _upper_high += upper_needs;
 875     }
 876   }
 877 
 878   if (pre_touch || AlwaysPreTouch) {
 879     os::pretouch_memory(previous_high, unaligned_new_high);
 880   }
 881 
 882   _high += bytes;
 883   return true;
 884 }
 885 
 886 // A page is uncommitted if the contents of the entire page is deemed unusable.
 887 // Continue to decrement the high() pointer until it reaches a page boundary
 888 // in which case that particular page can now be uncommitted.
 889 void VirtualSpace::shrink_by(size_t size) {
 890   if (committed_size() < size)
 891     fatal("Cannot shrink virtual space to negative size");
 892 
 893   if (special()) {
 894     // don't uncommit if the entire space is pinned in memory
 895     _high -= size;
 896     return;
 897   }
 898 
 899   char* unaligned_new_high = high() - size;
 900   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 901 
 902   // Calculate new unaligned address
 903   char* unaligned_upper_new_high =
 904     MAX2(unaligned_new_high, middle_high_boundary());
 905   char* unaligned_middle_new_high =
 906     MAX2(unaligned_new_high, lower_high_boundary());
 907   char* unaligned_lower_new_high =
 908     MAX2(unaligned_new_high, low_boundary());
 909 
 910   // Align address to region's alignment
 911   char* aligned_upper_new_high =
 912     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 913   char* aligned_middle_new_high =
 914     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 915   char* aligned_lower_new_high =
 916     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 917 
 918   // Determine which regions need to shrink
 919   size_t upper_needs = 0;
 920   if (aligned_upper_new_high < upper_high()) {
 921     upper_needs =
 922       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 923   }
 924   size_t middle_needs = 0;
 925   if (aligned_middle_new_high < middle_high()) {
 926     middle_needs =
 927       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 928   }
 929   size_t lower_needs = 0;
 930   if (aligned_lower_new_high < lower_high()) {
 931     lower_needs =
 932       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 933   }
 934 
 935   // Check contiguity.
 936   assert(middle_high_boundary() <= upper_high() &&
 937          upper_high() <= upper_high_boundary(),
 938          "high address must be contained within the region");
 939   assert(lower_high_boundary() <= middle_high() &&
 940          middle_high() <= middle_high_boundary(),
 941          "high address must be contained within the region");
 942   assert(low_boundary() <= lower_high() &&
 943          lower_high() <= lower_high_boundary(),
 944          "high address must be contained within the region");
 945 
 946   // Uncommit
 947   if (upper_needs > 0) {
 948     assert(middle_high_boundary() <= aligned_upper_new_high &&
 949            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 950            "must not shrink beyond region");
 951     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 952       debug_only(warning("os::uncommit_memory failed"));
 953       return;
 954     } else {
 955       _upper_high -= upper_needs;
 956     }
 957   }
 958   if (middle_needs > 0) {
 959     assert(lower_high_boundary() <= aligned_middle_new_high &&
 960            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 961            "must not shrink beyond region");
 962     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 963       debug_only(warning("os::uncommit_memory failed"));
 964       return;
 965     } else {
 966       _middle_high -= middle_needs;
 967     }
 968   }
 969   if (lower_needs > 0) {
 970     assert(low_boundary() <= aligned_lower_new_high &&
 971            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 972            "must not shrink beyond region");
 973     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 974       debug_only(warning("os::uncommit_memory failed"));
 975       return;
 976     } else {
 977       _lower_high -= lower_needs;
 978     }
 979   }
 980 
 981   _high -= size;
 982 }
 983 
 984 #ifndef PRODUCT
 985 void VirtualSpace::check_for_contiguity() {
 986   // Check contiguity.
 987   assert(low_boundary() <= lower_high() &&
 988          lower_high() <= lower_high_boundary(),
 989          "high address must be contained within the region");
 990   assert(lower_high_boundary() <= middle_high() &&
 991          middle_high() <= middle_high_boundary(),
 992          "high address must be contained within the region");
 993   assert(middle_high_boundary() <= upper_high() &&
 994          upper_high() <= upper_high_boundary(),
 995          "high address must be contained within the region");
 996   assert(low() >= low_boundary(), "low");
 997   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
 998   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
 999   assert(high() <= upper_high(), "upper high");
1000 }
1001 
1002 void VirtualSpace::print_on(outputStream* out) {
1003   out->print   ("Virtual space:");
1004   if (special()) out->print(" (pinned in memory)");
1005   out->cr();
1006   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
1007   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
1008   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low()), p2i(high()));
1009   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low_boundary()), p2i(high_boundary()));
1010 }
1011 
1012 void VirtualSpace::print() {
1013   print_on(tty);
1014 }
1015 
1016 /////////////// Unit tests ///////////////
1017 
1018 #ifndef PRODUCT
1019 
1020 #define test_log(...) \
1021   do {\
1022     if (VerboseInternalVMTests) { \
1023       tty->print_cr(__VA_ARGS__); \
1024       tty->flush(); \
1025     }\
1026   } while (false)
1027 
1028 class TestReservedSpace : AllStatic {
1029  public:
1030   static void small_page_write(void* addr, size_t size) {
1031     size_t page_size = os::vm_page_size();
1032 
1033     char* end = (char*)addr + size;
1034     for (char* p = (char*)addr; p < end; p += page_size) {
1035       *p = 1;
1036     }
1037   }
1038 
1039   static void release_memory_for_test(ReservedSpace rs) {
1040     if (rs.special()) {
1041       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1042     } else {
1043       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1044     }
1045   }
1046 
1047   static void test_reserved_space1(size_t size, size_t alignment) {
1048     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
1049 
1050     assert(is_size_aligned(size, alignment), "Incorrect input parameters");
1051 
1052     ReservedSpace rs(size,          // size
1053                      alignment,     // alignment
1054                      UseLargePages, // large
1055                      (char *)NULL); // requested_address
1056 
1057     test_log(" rs.special() == %d", rs.special());
1058 
1059     assert(rs.base() != NULL, "Must be");
1060     assert(rs.size() == size, "Must be");
1061 
1062     assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1063     assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1064 
1065     if (rs.special()) {
1066       small_page_write(rs.base(), size);
1067     }
1068 
1069     release_memory_for_test(rs);
1070   }
1071 
1072   static void test_reserved_space2(size_t size) {
1073     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
1074 
1075     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1076 
1077     ReservedSpace rs(size);
1078 
1079     test_log(" rs.special() == %d", rs.special());
1080 
1081     assert(rs.base() != NULL, "Must be");
1082     assert(rs.size() == size, "Must be");
1083 
1084     if (rs.special()) {
1085       small_page_write(rs.base(), size);
1086     }
1087 
1088     release_memory_for_test(rs);
1089   }
1090 
1091   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1092     test_log("test_reserved_space3(%p, %p, %d)",
1093         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
1094 
1095     if (size < alignment) {
1096       // Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test.
1097       assert((size_t)os::vm_page_size() == os::large_page_size(), "Test needs further refinement");
1098       return;
1099     }
1100 
1101     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1102     assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
1103 
1104     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1105 
1106     ReservedSpace rs(size, alignment, large, false);
1107 
1108     test_log(" rs.special() == %d", rs.special());
1109 
1110     assert(rs.base() != NULL, "Must be");
1111     assert(rs.size() == size, "Must be");
1112 
1113     if (rs.special()) {
1114       small_page_write(rs.base(), size);
1115     }
1116 
1117     release_memory_for_test(rs);
1118   }
1119 
1120 
1121   static void test_reserved_space1() {
1122     size_t size = 2 * 1024 * 1024;
1123     size_t ag   = os::vm_allocation_granularity();
1124 
1125     test_reserved_space1(size,      ag);
1126     test_reserved_space1(size * 2,  ag);
1127     test_reserved_space1(size * 10, ag);
1128   }
1129 
1130   static void test_reserved_space2() {
1131     size_t size = 2 * 1024 * 1024;
1132     size_t ag = os::vm_allocation_granularity();
1133 
1134     test_reserved_space2(size * 1);
1135     test_reserved_space2(size * 2);
1136     test_reserved_space2(size * 10);
1137     test_reserved_space2(ag);
1138     test_reserved_space2(size - ag);
1139     test_reserved_space2(size);
1140     test_reserved_space2(size + ag);
1141     test_reserved_space2(size * 2);
1142     test_reserved_space2(size * 2 - ag);
1143     test_reserved_space2(size * 2 + ag);
1144     test_reserved_space2(size * 3);
1145     test_reserved_space2(size * 3 - ag);
1146     test_reserved_space2(size * 3 + ag);
1147     test_reserved_space2(size * 10);
1148     test_reserved_space2(size * 10 + size / 2);
1149   }
1150 
1151   static void test_reserved_space3() {
1152     size_t ag = os::vm_allocation_granularity();
1153 
1154     test_reserved_space3(ag,      ag    , false);
1155     test_reserved_space3(ag * 2,  ag    , false);
1156     test_reserved_space3(ag * 3,  ag    , false);
1157     test_reserved_space3(ag * 2,  ag * 2, false);
1158     test_reserved_space3(ag * 4,  ag * 2, false);
1159     test_reserved_space3(ag * 8,  ag * 2, false);
1160     test_reserved_space3(ag * 4,  ag * 4, false);
1161     test_reserved_space3(ag * 8,  ag * 4, false);
1162     test_reserved_space3(ag * 16, ag * 4, false);
1163 
1164     if (UseLargePages) {
1165       size_t lp = os::large_page_size();
1166 
1167       // Without large pages
1168       test_reserved_space3(lp,     ag * 4, false);
1169       test_reserved_space3(lp * 2, ag * 4, false);
1170       test_reserved_space3(lp * 4, ag * 4, false);
1171       test_reserved_space3(lp,     lp    , false);
1172       test_reserved_space3(lp * 2, lp    , false);
1173       test_reserved_space3(lp * 3, lp    , false);
1174       test_reserved_space3(lp * 2, lp * 2, false);
1175       test_reserved_space3(lp * 4, lp * 2, false);
1176       test_reserved_space3(lp * 8, lp * 2, false);
1177 
1178       // With large pages
1179       test_reserved_space3(lp, ag * 4    , true);
1180       test_reserved_space3(lp * 2, ag * 4, true);
1181       test_reserved_space3(lp * 4, ag * 4, true);
1182       test_reserved_space3(lp, lp        , true);
1183       test_reserved_space3(lp * 2, lp    , true);
1184       test_reserved_space3(lp * 3, lp    , true);
1185       test_reserved_space3(lp * 2, lp * 2, true);
1186       test_reserved_space3(lp * 4, lp * 2, true);
1187       test_reserved_space3(lp * 8, lp * 2, true);
1188     }
1189   }
1190 
1191   static void test_reserved_space() {
1192     test_reserved_space1();
1193     test_reserved_space2();
1194     test_reserved_space3();
1195   }
1196 };
1197 
1198 void TestReservedSpace_test() {
1199   TestReservedSpace::test_reserved_space();
1200 }
1201 
1202 #define assert_equals(actual, expected)  \
1203   assert(actual == expected,             \
1204          "Got " SIZE_FORMAT " expected " \
1205          SIZE_FORMAT, actual, expected);
1206 
1207 #define assert_ge(value1, value2)                  \
1208   assert(value1 >= value2,                         \
1209          "'" #value1 "': " SIZE_FORMAT " '"        \
1210          #value2 "': " SIZE_FORMAT, value1, value2);
1211 
1212 #define assert_lt(value1, value2)                  \
1213   assert(value1 < value2,                          \
1214          "'" #value1 "': " SIZE_FORMAT " '"        \
1215          #value2 "': " SIZE_FORMAT, value1, value2);
1216 
1217 
1218 class TestVirtualSpace : AllStatic {
1219   enum TestLargePages {
1220     Default,
1221     Disable,
1222     Reserve,
1223     Commit
1224   };
1225 
1226   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
1227     switch(mode) {
1228     default:
1229     case Default:
1230     case Reserve:
1231       return ReservedSpace(reserve_size_aligned);
1232     case Disable:
1233     case Commit:
1234       return ReservedSpace(reserve_size_aligned,
1235                            os::vm_allocation_granularity(),
1236                            /* large */ false, /* exec */ false);
1237     }
1238   }
1239 
1240   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1241     switch(mode) {
1242     default:
1243     case Default:
1244     case Reserve:
1245       return vs.initialize(rs, 0);
1246     case Disable:
1247       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1248     case Commit:
1249       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1250     }
1251   }
1252 
1253  public:
1254   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1255                                                         TestLargePages mode = Default) {
1256     size_t granularity = os::vm_allocation_granularity();
1257     size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
1258 
1259     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1260 
1261     assert(reserved.is_reserved(), "Must be");
1262 
1263     VirtualSpace vs;
1264     bool initialized = initialize_virtual_space(vs, reserved, mode);
1265     assert(initialized, "Failed to initialize VirtualSpace");
1266 
1267     vs.expand_by(commit_size, false);
1268 
1269     if (vs.special()) {
1270       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1271     } else {
1272       assert_ge(vs.actual_committed_size(), commit_size);
1273       // Approximate the commit granularity.
1274       // Make sure that we don't commit using large pages
1275       // if large pages has been disabled for this VirtualSpace.
1276       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1277                                    os::vm_page_size() : os::large_page_size();
1278       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1279     }
1280 
1281     reserved.release();
1282   }
1283 
1284   static void test_virtual_space_actual_committed_space_one_large_page() {
1285     if (!UseLargePages) {
1286       return;
1287     }
1288 
1289     size_t large_page_size = os::large_page_size();
1290 
1291     ReservedSpace reserved(large_page_size, large_page_size, true, false);
1292 
1293     assert(reserved.is_reserved(), "Must be");
1294 
1295     VirtualSpace vs;
1296     bool initialized = vs.initialize(reserved, 0);
1297     assert(initialized, "Failed to initialize VirtualSpace");
1298 
1299     vs.expand_by(large_page_size, false);
1300 
1301     assert_equals(vs.actual_committed_size(), large_page_size);
1302 
1303     reserved.release();
1304   }
1305 
1306   static void test_virtual_space_actual_committed_space() {
1307     test_virtual_space_actual_committed_space(4 * K, 0);
1308     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1309     test_virtual_space_actual_committed_space(8 * K, 0);
1310     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1311     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1312     test_virtual_space_actual_committed_space(12 * K, 0);
1313     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1314     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1315     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1316     test_virtual_space_actual_committed_space(64 * K, 0);
1317     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1318     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1319     test_virtual_space_actual_committed_space(2 * M, 0);
1320     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1321     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1322     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1323     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1324     test_virtual_space_actual_committed_space(10 * M, 0);
1325     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1326     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1327     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1328     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1329     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1330     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1331   }
1332 
1333   static void test_virtual_space_disable_large_pages() {
1334     if (!UseLargePages) {
1335       return;
1336     }
1337     // These test cases verify that if we force VirtualSpace to disable large pages
1338     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1339     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1340     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1341     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1342     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1343     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1344     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1345 
1346     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1347     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1348     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1349     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1350     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1351     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1352     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1353 
1354     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1355     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1356     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1357     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1358     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1359     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1360     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1361   }
1362 
1363   static void test_virtual_space() {
1364     test_virtual_space_actual_committed_space();
1365     test_virtual_space_actual_committed_space_one_large_page();
1366     test_virtual_space_disable_large_pages();
1367   }
1368 };
1369 
1370 void TestVirtualSpace_test() {
1371   TestVirtualSpace::test_virtual_space();
1372 }
1373 
1374 #endif // PRODUCT
1375 
1376 #endif