1 /*
   2  * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeCacheExtensions.hpp"
  27 #include "logging/log.hpp"
  28 #include "memory/resourceArea.hpp"
  29 #include "memory/virtualspace.hpp"
  30 #include "oops/markOop.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "services/memTracker.hpp"
  33 
  34 // ReservedSpace
  35 
  36 // Dummy constructor
  37 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  38     _alignment(0), _special(false), _executable(false) {
  39 }
  40 
  41 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) {
  42   bool has_preferred_page_size = preferred_page_size != 0;
  43   // Want to use large pages where possible and pad with small pages.
  44   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
  45   bool large_pages = page_size != (size_t)os::vm_page_size();
  46   size_t alignment;
  47   if (large_pages && has_preferred_page_size) {
  48     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  49     // ReservedSpace initialization requires size to be aligned to the given
  50     // alignment. Align the size up.
  51     size = align_size_up(size, alignment);
  52   } else {
  53     // Don't force the alignment to be large page aligned,
  54     // since that will waste memory.
  55     alignment = os::vm_allocation_granularity();
  56   }
  57   initialize(size, alignment, large_pages, NULL, false);
  58 }
  59 
  60 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  61                              bool large,
  62                              char* requested_address) {
  63   initialize(size, alignment, large, requested_address, false);
  64 }
  65 
  66 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  67                              bool large,
  68                              bool executable) {
  69   initialize(size, alignment, large, NULL, executable);
  70 }
  71 
  72 // Helper method.
  73 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  74                                            const size_t size, bool special)
  75 {
  76   if (base == requested_address || requested_address == NULL)
  77     return false; // did not fail
  78 
  79   if (base != NULL) {
  80     // Different reserve address may be acceptable in other cases
  81     // but for compressed oops heap should be at requested address.
  82     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  83     log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
  84     // OS ignored requested address. Try different address.
  85     if (special) {
  86       if (!os::release_memory_special(base, size)) {
  87         fatal("os::release_memory_special failed");
  88       }
  89     } else {
  90       if (!os::release_memory(base, size)) {
  91         fatal("os::release_memory failed");
  92       }
  93     }
  94   }
  95   return true;
  96 }
  97 
  98 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
  99                                char* requested_address,
 100                                bool executable) {
 101   const size_t granularity = os::vm_allocation_granularity();
 102   assert((size & (granularity - 1)) == 0,
 103          "size not aligned to os::vm_allocation_granularity()");
 104   assert((alignment & (granularity - 1)) == 0,
 105          "alignment not aligned to os::vm_allocation_granularity()");
 106   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 107          "not a power of 2");
 108 
 109   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 110 
 111   _base = NULL;
 112   _size = 0;
 113   _special = false;
 114   _executable = executable;
 115   _alignment = 0;
 116   _noaccess_prefix = 0;
 117   if (size == 0) {
 118     return;
 119   }
 120 
 121   // If OS doesn't support demand paging for large page memory, we need
 122   // to use reserve_memory_special() to reserve and pin the entire region.
 123   // If there is a backing file directory for this VirtualSpace then whether largepages are allocated is upto the filesystem the dir resides in.
 124   // So we ignore the UseLargePages flag in this case.
 125   bool special = (_backingFileDir == NULL) && (large && !os::can_commit_large_page_memory());
 126   char* base = NULL;
 127 
 128   if (special) {
 129 
 130     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 131 
 132     if (base != NULL) {
 133       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 134         // OS ignored requested address. Try different address.
 135         return;
 136       }
 137       // Check alignment constraints.
 138       assert((uintptr_t) base % alignment == 0,
 139              "Large pages returned a non-aligned address, base: "
 140              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 141              p2i(base), alignment);
 142       _special = true;
 143     } else {
 144       // failed; try to reserve regular memory below
 145       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 146                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 147         log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 148       }
 149     }
 150   }
 151 
 152   if (base == NULL) {
 153     // Optimistically assume that the OSes returns an aligned base pointer.
 154     // When reserving a large address range, most OSes seem to align to at
 155     // least 64K.
 156 
 157     // If the memory was requested at a particular address, use
 158     // os::attempt_reserve_memory_at() to avoid over mapping something
 159     // important.  If available space is not detected, return NULL.
 160 
 161     if (requested_address != 0) {
 162       base = os::attempt_reserve_memory_at(size, requested_address);
 163       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 164         // OS ignored requested address. Try different address.
 165         base = NULL;
 166       }
 167     } else {
 168       base = os::reserve_memory(size, NULL, alignment);
 169     }
 170 
 171     if (base == NULL) return;
 172 
 173     // Check alignment constraints
 174     if ((((size_t)base) & (alignment - 1)) != 0) {
 175       // Base not aligned, retry
 176       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 177       // Make sure that size is aligned
 178       size = align_size_up(size, alignment);
 179       base = os::reserve_memory_aligned(size, alignment);
 180 
 181       if (requested_address != 0 &&
 182           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 183         // As a result of the alignment constraints, the allocated base differs
 184         // from the requested address. Return back to the caller who can
 185         // take remedial action (like try again without a requested address).
 186         assert(_base == NULL, "should be");
 187         return;
 188       }
 189     }
 190   }
 191   // Done
 192   _base = base;
 193   _size = size;
 194   _alignment = alignment;
 195 
 196   if (_backingFileDir != NULL) {
 197         // At this point a virtual address range is reserved, now map this memory to a file
 198     os::map_memory_to_file(base, size, _backingFileDir);
 199     // mark this virtual space as _special because the physical memory is committed.
 200     _special = true;
 201   }
 202 }
 203 
 204 
 205 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 206                              bool special, bool executable) {
 207   assert((size % os::vm_allocation_granularity()) == 0,
 208          "size not allocation aligned");
 209   _base = base;
 210   _size = size;
 211   _alignment = alignment;
 212   _noaccess_prefix = 0;
 213   _special = special;
 214   _executable = executable;
 215 }
 216 
 217 
 218 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 219                                         bool split, bool realloc) {
 220   assert(partition_size <= size(), "partition failed");
 221   if (split) {
 222     os::split_reserved_memory(base(), size(), partition_size, realloc);
 223   }
 224   ReservedSpace result(base(), partition_size, alignment, special(),
 225                        executable());
 226   return result;
 227 }
 228 
 229 
 230 ReservedSpace
 231 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 232   assert(partition_size <= size(), "partition failed");
 233   ReservedSpace result(base() + partition_size, size() - partition_size,
 234                        alignment, special(), executable());
 235   return result;
 236 }
 237 
 238 
 239 size_t ReservedSpace::page_align_size_up(size_t size) {
 240   return align_size_up(size, os::vm_page_size());
 241 }
 242 
 243 
 244 size_t ReservedSpace::page_align_size_down(size_t size) {
 245   return align_size_down(size, os::vm_page_size());
 246 }
 247 
 248 
 249 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 250   return align_size_up(size, os::vm_allocation_granularity());
 251 }
 252 
 253 
 254 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 255   return align_size_down(size, os::vm_allocation_granularity());
 256 }
 257 
 258 
 259 void ReservedSpace::release() {
 260   if (is_reserved()) {
 261     char *real_base = _base - _noaccess_prefix;
 262     const size_t real_size = _size + _noaccess_prefix;
 263     if (special()) {
 264       os::release_memory_special(real_base, real_size);
 265     } else{
 266       os::release_memory(real_base, real_size);
 267     }
 268     _base = NULL;
 269     _size = 0;
 270     _noaccess_prefix = 0;
 271     _alignment = 0;
 272     _special = false;
 273     _executable = false;
 274   }
 275 }
 276 
 277 static size_t noaccess_prefix_size(size_t alignment) {
 278   return lcm(os::vm_page_size(), alignment);
 279 }
 280 
 281 void ReservedHeapSpace::establish_noaccess_prefix() {
 282   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 283   _noaccess_prefix = noaccess_prefix_size(_alignment);
 284 
 285   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 286     if (true
 287         WIN64_ONLY(&& !UseLargePages)
 288         AIX_ONLY(&& os::vm_page_size() != SIZE_64K)) {
 289       // Protect memory at the base of the allocated region.
 290       // If special, the page was committed (only matters on windows)
 291       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 292         fatal("cannot protect protection page");
 293       }
 294       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
 295                                  PTR_FORMAT " / " INTX_FORMAT " bytes",
 296                                  p2i(_base),
 297                                  _noaccess_prefix);
 298       assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
 299     } else {
 300       Universe::set_narrow_oop_use_implicit_null_checks(false);
 301     }
 302   }
 303 
 304   _base += _noaccess_prefix;
 305   _size -= _noaccess_prefix;
 306   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 307 }
 308 
 309 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 310 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 311 // might still fulfill the wishes of the caller.
 312 // Assures the memory is aligned to 'alignment'.
 313 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 314 void ReservedHeapSpace::try_reserve_heap(size_t size,
 315                                          size_t alignment,
 316                                          bool large,
 317                                          char* requested_address) {
 318   if (_base != NULL) {
 319     // We tried before, but we didn't like the address delivered.
 320     release();
 321   }
 322 
 323   // If OS doesn't support demand paging for large page memory, we need
 324   // to use reserve_memory_special() to reserve and pin the entire region.
 325   // If there is a backing file directory for this VirtualSpace then whether largepages are allocated is upto the filesystem the dir resides in.
 326   // So we ignore the UseLargePages flag in this case.
 327   bool special = (_backingFileDir == NULL) && (large && !os::can_commit_large_page_memory());
 328   char* base = NULL;
 329 
 330   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
 331                              " heap of size " SIZE_FORMAT_HEX,
 332                              p2i(requested_address),
 333                              size);
 334 
 335   if (special) {
 336     base = os::reserve_memory_special(size, alignment, requested_address, false);
 337 
 338     if (base != NULL) {
 339       // Check alignment constraints.
 340       assert((uintptr_t) base % alignment == 0,
 341              "Large pages returned a non-aligned address, base: "
 342              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 343              p2i(base), alignment);
 344       _special = true;
 345     }
 346   }
 347 
 348   if (base == NULL) {
 349     // Failed; try to reserve regular memory below
 350     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 351                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 352       log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 353     }
 354 
 355     // Optimistically assume that the OSes returns an aligned base pointer.
 356     // When reserving a large address range, most OSes seem to align to at
 357     // least 64K.
 358 
 359     // If the memory was requested at a particular address, use
 360     // os::attempt_reserve_memory_at() to avoid over mapping something
 361     // important.  If available space is not detected, return NULL.
 362 
 363     if (requested_address != 0) {
 364       base = os::attempt_reserve_memory_at(size, requested_address);
 365     } else {
 366       base = os::reserve_memory(size, NULL, alignment);
 367     }
 368   }
 369   if (base == NULL) { return; }
 370 
 371   // Done
 372   _base = base;
 373   _size = size;
 374   _alignment = alignment;
 375 
 376   // Check alignment constraints
 377   if ((((size_t)base) & (alignment - 1)) != 0) {
 378     // Base not aligned, retry.
 379     release();
 380     return;
 381   }
 382   if (_backingFileDir != NULL) {
 383         // At this point a virtual address range is reserved, now map this memory to a file
 384     os::map_memory_to_file(base, size, _backingFileDir);
 385     // mark this virtual space as _special because the physical memory is committed.
 386     _special = true;
 387   }
 388 }
 389 
 390 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 391                                           char *lowest_start,
 392                                           size_t attach_point_alignment,
 393                                           char *aligned_heap_base_min_address,
 394                                           char *upper_bound,
 395                                           size_t size,
 396                                           size_t alignment,
 397                                           bool large) {
 398   const size_t attach_range = highest_start - lowest_start;
 399   // Cap num_attempts at possible number.
 400   // At least one is possible even for 0 sized attach range.
 401   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
 402   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
 403 
 404   const size_t stepsize = (attach_range == 0) ? // Only one try.
 405     (size_t) highest_start : align_size_up(attach_range / num_attempts_to_try, attach_point_alignment);
 406 
 407   // Try attach points from top to bottom.
 408   char* attach_point = highest_start;
 409   while (attach_point >= lowest_start  &&
 410          attach_point <= highest_start &&  // Avoid wrap around.
 411          ((_base == NULL) ||
 412           (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
 413     try_reserve_heap(size, alignment, large, attach_point);
 414     attach_point -= stepsize;
 415   }
 416 }
 417 
 418 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
 419 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
 420 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
 421 
 422 // Helper for heap allocation. Returns an array with addresses
 423 // (OS-specific) which are suited for disjoint base mode. Array is
 424 // NULL terminated.
 425 static char** get_attach_addresses_for_disjoint_mode() {
 426   static uint64_t addresses[] = {
 427      2 * SIZE_32G,
 428      3 * SIZE_32G,
 429      4 * SIZE_32G,
 430      8 * SIZE_32G,
 431     10 * SIZE_32G,
 432      1 * SIZE_64K * SIZE_32G,
 433      2 * SIZE_64K * SIZE_32G,
 434      3 * SIZE_64K * SIZE_32G,
 435      4 * SIZE_64K * SIZE_32G,
 436     16 * SIZE_64K * SIZE_32G,
 437     32 * SIZE_64K * SIZE_32G,
 438     34 * SIZE_64K * SIZE_32G,
 439     0
 440   };
 441 
 442   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
 443   // the array is sorted.
 444   uint i = 0;
 445   while (addresses[i] != 0 &&
 446          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
 447     i++;
 448   }
 449   uint start = i;
 450 
 451   // Avoid more steps than requested.
 452   i = 0;
 453   while (addresses[start+i] != 0) {
 454     if (i == HeapSearchSteps) {
 455       addresses[start+i] = 0;
 456       break;
 457     }
 458     i++;
 459   }
 460 
 461   return (char**) &addresses[start];
 462 }
 463 
 464 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
 465   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
 466             "can not allocate compressed oop heap for this size");
 467   guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
 468   assert(HeapBaseMinAddress > 0, "sanity");
 469 
 470   const size_t granularity = os::vm_allocation_granularity();
 471   assert((size & (granularity - 1)) == 0,
 472          "size not aligned to os::vm_allocation_granularity()");
 473   assert((alignment & (granularity - 1)) == 0,
 474          "alignment not aligned to os::vm_allocation_granularity()");
 475   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 476          "not a power of 2");
 477 
 478   // The necessary attach point alignment for generated wish addresses.
 479   // This is needed to increase the chance of attaching for mmap and shmat.
 480   const size_t os_attach_point_alignment =
 481     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
 482     NOT_AIX(os::vm_allocation_granularity());
 483   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 484 
 485   char *aligned_heap_base_min_address = (char *)align_ptr_up((void *)HeapBaseMinAddress, alignment);
 486   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
 487     noaccess_prefix_size(alignment) : 0;
 488 
 489   // Attempt to alloc at user-given address.
 490   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 491     try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
 492     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
 493       release();
 494     }
 495   }
 496 
 497   // Keep heap at HeapBaseMinAddress.
 498   if (_base == NULL) {
 499 
 500     // Try to allocate the heap at addresses that allow efficient oop compression.
 501     // Different schemes are tried, in order of decreasing optimization potential.
 502     //
 503     // For this, try_reserve_heap() is called with the desired heap base addresses.
 504     // A call into the os layer to allocate at a given address can return memory
 505     // at a different address than requested.  Still, this might be memory at a useful
 506     // address. try_reserve_heap() always returns this allocated memory, as only here
 507     // the criteria for a good heap are checked.
 508 
 509     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 510     // Give it several tries from top of range to bottom.
 511     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
 512 
 513       // Calc address range within we try to attach (range of possible start addresses).
 514       char* const highest_start = (char *)align_ptr_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 515       char* const lowest_start  = (char *)align_ptr_up(aligned_heap_base_min_address, attach_point_alignment);
 516       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 517                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
 518     }
 519 
 520     // zerobased: Attempt to allocate in the lower 32G.
 521     // But leave room for the compressed class pointers, which is allocated above
 522     // the heap.
 523     char *zerobased_max = (char *)OopEncodingHeapMax;
 524     const size_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
 525     // For small heaps, save some space for compressed class pointer
 526     // space so it can be decoded with no base.
 527     if (UseCompressedClassPointers && !UseSharedSpaces &&
 528         OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
 529         (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
 530       zerobased_max = (char *)OopEncodingHeapMax - class_space;
 531     }
 532 
 533     // Give it several tries from top of range to bottom.
 534     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
 535         ((_base == NULL) ||                        // No previous try succeeded.
 536          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 537 
 538       // Calc address range within we try to attach (range of possible start addresses).
 539       char *const highest_start = (char *)align_ptr_down(zerobased_max - size, attach_point_alignment);
 540       // Need to be careful about size being guaranteed to be less
 541       // than UnscaledOopHeapMax due to type constraints.
 542       char *lowest_start = aligned_heap_base_min_address;
 543       uint64_t unscaled_end = UnscaledOopHeapMax - size;
 544       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
 545         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
 546       }
 547       lowest_start  = (char *)align_ptr_up(lowest_start, attach_point_alignment);
 548       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 549                         aligned_heap_base_min_address, zerobased_max, size, alignment, large);
 550     }
 551 
 552     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 553     // implement null checks.
 554     noaccess_prefix = noaccess_prefix_size(alignment);
 555 
 556     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
 557     char** addresses = get_attach_addresses_for_disjoint_mode();
 558     int i = 0;
 559     while (addresses[i] &&                                 // End of array not yet reached.
 560            ((_base == NULL) ||                             // No previous try succeeded.
 561             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 562              !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 563       char* const attach_point = addresses[i];
 564       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 565       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 566       i++;
 567     }
 568 
 569     // Last, desperate try without any placement.
 570     if (_base == NULL) {
 571       log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
 572       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 573     }
 574   }
 575 }
 576 
 577 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* backingFSforHeap) : ReservedSpace() {
 578 
 579   if (size == 0) {
 580     return;
 581   }
 582 
 583   _backingFileDir= backingFSforHeap;
 584   // Heap size should be aligned to alignment, too.
 585   guarantee(is_size_aligned(size, alignment), "set by caller");
 586 
 587   if (UseCompressedOops) {
 588     initialize_compressed_heap(size, alignment, large);
 589     if (_size > size) {
 590       // We allocated heap with noaccess prefix.
 591       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 592       // if we had to try at arbitrary address.
 593       establish_noaccess_prefix();
 594     }
 595   } else {
 596     initialize(size, alignment, large, NULL, false);
 597   }
 598 
 599   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 600          "area must be distinguishable from marks for mark-sweep");
 601   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 602          "area must be distinguishable from marks for mark-sweep");
 603 
 604   if (base() > 0) {
 605     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 606   }
 607 }
 608 
 609 // Reserve space for code segment.  Same as Java heap only we mark this as
 610 // executable.
 611 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 612                                      size_t rs_align,
 613                                      bool large) :
 614   ReservedSpace(r_size, rs_align, large, /*executable*/ CodeCacheExtensions::support_dynamic_code()) {
 615   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 616 }
 617 
 618 // VirtualSpace
 619 
 620 VirtualSpace::VirtualSpace() {
 621   _low_boundary           = NULL;
 622   _high_boundary          = NULL;
 623   _low                    = NULL;
 624   _high                   = NULL;
 625   _lower_high             = NULL;
 626   _middle_high            = NULL;
 627   _upper_high             = NULL;
 628   _lower_high_boundary    = NULL;
 629   _middle_high_boundary   = NULL;
 630   _upper_high_boundary    = NULL;
 631   _lower_alignment        = 0;
 632   _middle_alignment       = 0;
 633   _upper_alignment        = 0;
 634   _special                = false;
 635   _executable             = false;
 636 }
 637 
 638 
 639 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 640   const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
 641   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 642 }
 643 
 644 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 645   if(!rs.is_reserved()) return false;  // allocation failed.
 646   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 647   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 648 
 649   _low_boundary  = rs.base();
 650   _high_boundary = low_boundary() + rs.size();
 651 
 652   _low = low_boundary();
 653   _high = low();
 654 
 655   _special = rs.special();
 656   _executable = rs.executable();
 657 
 658   // When a VirtualSpace begins life at a large size, make all future expansion
 659   // and shrinking occur aligned to a granularity of large pages.  This avoids
 660   // fragmentation of physical addresses that inhibits the use of large pages
 661   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 662   // page size, the only spaces that get handled this way are codecache and
 663   // the heap itself, both of which provide a substantial performance
 664   // boost in many benchmarks when covered by large pages.
 665   //
 666   // No attempt is made to force large page alignment at the very top and
 667   // bottom of the space if they are not aligned so already.
 668   _lower_alignment  = os::vm_page_size();
 669   _middle_alignment = max_commit_granularity;
 670   _upper_alignment  = os::vm_page_size();
 671 
 672   // End of each region
 673   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
 674   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
 675   _upper_high_boundary = high_boundary();
 676 
 677   // High address of each region
 678   _lower_high = low_boundary();
 679   _middle_high = lower_high_boundary();
 680   _upper_high = middle_high_boundary();
 681 
 682   // commit to initial size
 683   if (committed_size > 0) {
 684     if (!expand_by(committed_size)) {
 685       return false;
 686     }
 687   }
 688   return true;
 689 }
 690 
 691 
 692 VirtualSpace::~VirtualSpace() {
 693   release();
 694 }
 695 
 696 
 697 void VirtualSpace::release() {
 698   // This does not release memory it never reserved.
 699   // Caller must release via rs.release();
 700   _low_boundary           = NULL;
 701   _high_boundary          = NULL;
 702   _low                    = NULL;
 703   _high                   = NULL;
 704   _lower_high             = NULL;
 705   _middle_high            = NULL;
 706   _upper_high             = NULL;
 707   _lower_high_boundary    = NULL;
 708   _middle_high_boundary   = NULL;
 709   _upper_high_boundary    = NULL;
 710   _lower_alignment        = 0;
 711   _middle_alignment       = 0;
 712   _upper_alignment        = 0;
 713   _special                = false;
 714   _executable             = false;
 715 }
 716 
 717 
 718 size_t VirtualSpace::committed_size() const {
 719   return pointer_delta(high(), low(), sizeof(char));
 720 }
 721 
 722 
 723 size_t VirtualSpace::reserved_size() const {
 724   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 725 }
 726 
 727 
 728 size_t VirtualSpace::uncommitted_size()  const {
 729   return reserved_size() - committed_size();
 730 }
 731 
 732 size_t VirtualSpace::actual_committed_size() const {
 733   // Special VirtualSpaces commit all reserved space up front.
 734   if (special()) {
 735     return reserved_size();
 736   }
 737 
 738   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 739   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 740   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 741 
 742 #ifdef ASSERT
 743   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 744   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 745   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 746 
 747   if (committed_high > 0) {
 748     assert(committed_low == lower, "Must be");
 749     assert(committed_middle == middle, "Must be");
 750   }
 751 
 752   if (committed_middle > 0) {
 753     assert(committed_low == lower, "Must be");
 754   }
 755   if (committed_middle < middle) {
 756     assert(committed_high == 0, "Must be");
 757   }
 758 
 759   if (committed_low < lower) {
 760     assert(committed_high == 0, "Must be");
 761     assert(committed_middle == 0, "Must be");
 762   }
 763 #endif
 764 
 765   return committed_low + committed_middle + committed_high;
 766 }
 767 
 768 
 769 bool VirtualSpace::contains(const void* p) const {
 770   return low() <= (const char*) p && (const char*) p < high();
 771 }
 772 
 773 /*
 774    First we need to determine if a particular virtual space is using large
 775    pages.  This is done at the initialize function and only virtual spaces
 776    that are larger than LargePageSizeInBytes use large pages.  Once we
 777    have determined this, all expand_by and shrink_by calls must grow and
 778    shrink by large page size chunks.  If a particular request
 779    is within the current large page, the call to commit and uncommit memory
 780    can be ignored.  In the case that the low and high boundaries of this
 781    space is not large page aligned, the pages leading to the first large
 782    page address and the pages after the last large page address must be
 783    allocated with default pages.
 784 */
 785 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 786   if (uncommitted_size() < bytes) return false;
 787 
 788   if (special()) {
 789     // don't commit memory if the entire space is pinned in memory
 790     _high += bytes;
 791     return true;
 792   }
 793 
 794   char* previous_high = high();
 795   char* unaligned_new_high = high() + bytes;
 796   assert(unaligned_new_high <= high_boundary(),
 797          "cannot expand by more than upper boundary");
 798 
 799   // Calculate where the new high for each of the regions should be.  If
 800   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 801   // then the unaligned lower and upper new highs would be the
 802   // lower_high() and upper_high() respectively.
 803   char* unaligned_lower_new_high =
 804     MIN2(unaligned_new_high, lower_high_boundary());
 805   char* unaligned_middle_new_high =
 806     MIN2(unaligned_new_high, middle_high_boundary());
 807   char* unaligned_upper_new_high =
 808     MIN2(unaligned_new_high, upper_high_boundary());
 809 
 810   // Align the new highs based on the regions alignment.  lower and upper
 811   // alignment will always be default page size.  middle alignment will be
 812   // LargePageSizeInBytes if the actual size of the virtual space is in
 813   // fact larger than LargePageSizeInBytes.
 814   char* aligned_lower_new_high =
 815     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 816   char* aligned_middle_new_high =
 817     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 818   char* aligned_upper_new_high =
 819     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 820 
 821   // Determine which regions need to grow in this expand_by call.
 822   // If you are growing in the lower region, high() must be in that
 823   // region so calculate the size based on high().  For the middle and
 824   // upper regions, determine the starting point of growth based on the
 825   // location of high().  By getting the MAX of the region's low address
 826   // (or the previous region's high address) and high(), we can tell if it
 827   // is an intra or inter region growth.
 828   size_t lower_needs = 0;
 829   if (aligned_lower_new_high > lower_high()) {
 830     lower_needs =
 831       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 832   }
 833   size_t middle_needs = 0;
 834   if (aligned_middle_new_high > middle_high()) {
 835     middle_needs =
 836       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 837   }
 838   size_t upper_needs = 0;
 839   if (aligned_upper_new_high > upper_high()) {
 840     upper_needs =
 841       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 842   }
 843 
 844   // Check contiguity.
 845   assert(low_boundary() <= lower_high() &&
 846          lower_high() <= lower_high_boundary(),
 847          "high address must be contained within the region");
 848   assert(lower_high_boundary() <= middle_high() &&
 849          middle_high() <= middle_high_boundary(),
 850          "high address must be contained within the region");
 851   assert(middle_high_boundary() <= upper_high() &&
 852          upper_high() <= upper_high_boundary(),
 853          "high address must be contained within the region");
 854 
 855   // Commit regions
 856   if (lower_needs > 0) {
 857     assert(low_boundary() <= lower_high() &&
 858            lower_high() + lower_needs <= lower_high_boundary(),
 859            "must not expand beyond region");
 860     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
 861       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 862                          ", lower_needs=" SIZE_FORMAT ", %d) failed",
 863                          p2i(lower_high()), lower_needs, _executable);)
 864       return false;
 865     } else {
 866       _lower_high += lower_needs;
 867     }
 868   }
 869   if (middle_needs > 0) {
 870     assert(lower_high_boundary() <= middle_high() &&
 871            middle_high() + middle_needs <= middle_high_boundary(),
 872            "must not expand beyond region");
 873     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
 874                            _executable)) {
 875       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 876                          ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
 877                          ", %d) failed", p2i(middle_high()), middle_needs,
 878                          middle_alignment(), _executable);)
 879       return false;
 880     }
 881     _middle_high += middle_needs;
 882   }
 883   if (upper_needs > 0) {
 884     assert(middle_high_boundary() <= upper_high() &&
 885            upper_high() + upper_needs <= upper_high_boundary(),
 886            "must not expand beyond region");
 887     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
 888       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 889                          ", upper_needs=" SIZE_FORMAT ", %d) failed",
 890                          p2i(upper_high()), upper_needs, _executable);)
 891       return false;
 892     } else {
 893       _upper_high += upper_needs;
 894     }
 895   }
 896 
 897   if (pre_touch || AlwaysPreTouch) {
 898     os::pretouch_memory(previous_high, unaligned_new_high);
 899   }
 900 
 901   _high += bytes;
 902   return true;
 903 }
 904 
 905 // A page is uncommitted if the contents of the entire page is deemed unusable.
 906 // Continue to decrement the high() pointer until it reaches a page boundary
 907 // in which case that particular page can now be uncommitted.
 908 void VirtualSpace::shrink_by(size_t size) {
 909   if (committed_size() < size)
 910     fatal("Cannot shrink virtual space to negative size");
 911 
 912   if (special()) {
 913     // don't uncommit if the entire space is pinned in memory
 914     _high -= size;
 915     return;
 916   }
 917 
 918   char* unaligned_new_high = high() - size;
 919   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 920 
 921   // Calculate new unaligned address
 922   char* unaligned_upper_new_high =
 923     MAX2(unaligned_new_high, middle_high_boundary());
 924   char* unaligned_middle_new_high =
 925     MAX2(unaligned_new_high, lower_high_boundary());
 926   char* unaligned_lower_new_high =
 927     MAX2(unaligned_new_high, low_boundary());
 928 
 929   // Align address to region's alignment
 930   char* aligned_upper_new_high =
 931     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 932   char* aligned_middle_new_high =
 933     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 934   char* aligned_lower_new_high =
 935     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 936 
 937   // Determine which regions need to shrink
 938   size_t upper_needs = 0;
 939   if (aligned_upper_new_high < upper_high()) {
 940     upper_needs =
 941       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 942   }
 943   size_t middle_needs = 0;
 944   if (aligned_middle_new_high < middle_high()) {
 945     middle_needs =
 946       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 947   }
 948   size_t lower_needs = 0;
 949   if (aligned_lower_new_high < lower_high()) {
 950     lower_needs =
 951       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 952   }
 953 
 954   // Check contiguity.
 955   assert(middle_high_boundary() <= upper_high() &&
 956          upper_high() <= upper_high_boundary(),
 957          "high address must be contained within the region");
 958   assert(lower_high_boundary() <= middle_high() &&
 959          middle_high() <= middle_high_boundary(),
 960          "high address must be contained within the region");
 961   assert(low_boundary() <= lower_high() &&
 962          lower_high() <= lower_high_boundary(),
 963          "high address must be contained within the region");
 964 
 965   // Uncommit
 966   if (upper_needs > 0) {
 967     assert(middle_high_boundary() <= aligned_upper_new_high &&
 968            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 969            "must not shrink beyond region");
 970     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 971       debug_only(warning("os::uncommit_memory failed"));
 972       return;
 973     } else {
 974       _upper_high -= upper_needs;
 975     }
 976   }
 977   if (middle_needs > 0) {
 978     assert(lower_high_boundary() <= aligned_middle_new_high &&
 979            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 980            "must not shrink beyond region");
 981     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 982       debug_only(warning("os::uncommit_memory failed"));
 983       return;
 984     } else {
 985       _middle_high -= middle_needs;
 986     }
 987   }
 988   if (lower_needs > 0) {
 989     assert(low_boundary() <= aligned_lower_new_high &&
 990            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 991            "must not shrink beyond region");
 992     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 993       debug_only(warning("os::uncommit_memory failed"));
 994       return;
 995     } else {
 996       _lower_high -= lower_needs;
 997     }
 998   }
 999 
1000   _high -= size;
1001 }
1002 
1003 #ifndef PRODUCT
1004 void VirtualSpace::check_for_contiguity() {
1005   // Check contiguity.
1006   assert(low_boundary() <= lower_high() &&
1007          lower_high() <= lower_high_boundary(),
1008          "high address must be contained within the region");
1009   assert(lower_high_boundary() <= middle_high() &&
1010          middle_high() <= middle_high_boundary(),
1011          "high address must be contained within the region");
1012   assert(middle_high_boundary() <= upper_high() &&
1013          upper_high() <= upper_high_boundary(),
1014          "high address must be contained within the region");
1015   assert(low() >= low_boundary(), "low");
1016   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
1017   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
1018   assert(high() <= upper_high(), "upper high");
1019 }
1020 
1021 void VirtualSpace::print_on(outputStream* out) {
1022   out->print   ("Virtual space:");
1023   if (special()) out->print(" (pinned in memory)");
1024   out->cr();
1025   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
1026   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
1027   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low()), p2i(high()));
1028   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low_boundary()), p2i(high_boundary()));
1029 }
1030 
1031 void VirtualSpace::print() {
1032   print_on(tty);
1033 }
1034 
1035 /////////////// Unit tests ///////////////
1036 
1037 #ifndef PRODUCT
1038 
1039 #define test_log(...) \
1040   do {\
1041     if (VerboseInternalVMTests) { \
1042       tty->print_cr(__VA_ARGS__); \
1043       tty->flush(); \
1044     }\
1045   } while (false)
1046 
1047 class TestReservedSpace : AllStatic {
1048  public:
1049   static void small_page_write(void* addr, size_t size) {
1050     size_t page_size = os::vm_page_size();
1051 
1052     char* end = (char*)addr + size;
1053     for (char* p = (char*)addr; p < end; p += page_size) {
1054       *p = 1;
1055     }
1056   }
1057 
1058   static void release_memory_for_test(ReservedSpace rs) {
1059     if (rs.special()) {
1060       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1061     } else {
1062       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1063     }
1064   }
1065 
1066   static void test_reserved_space1(size_t size, size_t alignment) {
1067     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
1068 
1069     assert(is_size_aligned(size, alignment), "Incorrect input parameters");
1070 
1071     ReservedSpace rs(size,          // size
1072                      alignment,     // alignment
1073                      UseLargePages, // large
1074                      (char *)NULL); // requested_address
1075 
1076     test_log(" rs.special() == %d", rs.special());
1077 
1078     assert(rs.base() != NULL, "Must be");
1079     assert(rs.size() == size, "Must be");
1080 
1081     assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1082     assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1083 
1084     if (rs.special()) {
1085       small_page_write(rs.base(), size);
1086     }
1087 
1088     release_memory_for_test(rs);
1089   }
1090 
1091   static void test_reserved_space2(size_t size) {
1092     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
1093 
1094     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1095 
1096     ReservedSpace rs(size);
1097 
1098     test_log(" rs.special() == %d", rs.special());
1099 
1100     assert(rs.base() != NULL, "Must be");
1101     assert(rs.size() == size, "Must be");
1102 
1103     if (rs.special()) {
1104       small_page_write(rs.base(), size);
1105     }
1106 
1107     release_memory_for_test(rs);
1108   }
1109 
1110   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1111     test_log("test_reserved_space3(%p, %p, %d)",
1112         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
1113 
1114     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1115     assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
1116 
1117     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1118 
1119     ReservedSpace rs(size, alignment, large, false);
1120 
1121     test_log(" rs.special() == %d", rs.special());
1122 
1123     assert(rs.base() != NULL, "Must be");
1124     assert(rs.size() == size, "Must be");
1125 
1126     if (rs.special()) {
1127       small_page_write(rs.base(), size);
1128     }
1129 
1130     release_memory_for_test(rs);
1131   }
1132 
1133 
1134   static void test_reserved_space1() {
1135     size_t size = 2 * 1024 * 1024;
1136     size_t ag   = os::vm_allocation_granularity();
1137 
1138     test_reserved_space1(size,      ag);
1139     test_reserved_space1(size * 2,  ag);
1140     test_reserved_space1(size * 10, ag);
1141   }
1142 
1143   static void test_reserved_space2() {
1144     size_t size = 2 * 1024 * 1024;
1145     size_t ag = os::vm_allocation_granularity();
1146 
1147     test_reserved_space2(size * 1);
1148     test_reserved_space2(size * 2);
1149     test_reserved_space2(size * 10);
1150     test_reserved_space2(ag);
1151     test_reserved_space2(size - ag);
1152     test_reserved_space2(size);
1153     test_reserved_space2(size + ag);
1154     test_reserved_space2(size * 2);
1155     test_reserved_space2(size * 2 - ag);
1156     test_reserved_space2(size * 2 + ag);
1157     test_reserved_space2(size * 3);
1158     test_reserved_space2(size * 3 - ag);
1159     test_reserved_space2(size * 3 + ag);
1160     test_reserved_space2(size * 10);
1161     test_reserved_space2(size * 10 + size / 2);
1162   }
1163 
1164   static void test_reserved_space3() {
1165     size_t ag = os::vm_allocation_granularity();
1166 
1167     test_reserved_space3(ag,      ag    , false);
1168     test_reserved_space3(ag * 2,  ag    , false);
1169     test_reserved_space3(ag * 3,  ag    , false);
1170     test_reserved_space3(ag * 2,  ag * 2, false);
1171     test_reserved_space3(ag * 4,  ag * 2, false);
1172     test_reserved_space3(ag * 8,  ag * 2, false);
1173     test_reserved_space3(ag * 4,  ag * 4, false);
1174     test_reserved_space3(ag * 8,  ag * 4, false);
1175     test_reserved_space3(ag * 16, ag * 4, false);
1176 
1177     if (UseLargePages) {
1178       size_t lp = os::large_page_size();
1179 
1180       // Without large pages
1181       test_reserved_space3(lp,     ag * 4, false);
1182       test_reserved_space3(lp * 2, ag * 4, false);
1183       test_reserved_space3(lp * 4, ag * 4, false);
1184       test_reserved_space3(lp,     lp    , false);
1185       test_reserved_space3(lp * 2, lp    , false);
1186       test_reserved_space3(lp * 3, lp    , false);
1187       test_reserved_space3(lp * 2, lp * 2, false);
1188       test_reserved_space3(lp * 4, lp * 2, false);
1189       test_reserved_space3(lp * 8, lp * 2, false);
1190 
1191       // With large pages
1192       test_reserved_space3(lp, ag * 4    , true);
1193       test_reserved_space3(lp * 2, ag * 4, true);
1194       test_reserved_space3(lp * 4, ag * 4, true);
1195       test_reserved_space3(lp, lp        , true);
1196       test_reserved_space3(lp * 2, lp    , true);
1197       test_reserved_space3(lp * 3, lp    , true);
1198       test_reserved_space3(lp * 2, lp * 2, true);
1199       test_reserved_space3(lp * 4, lp * 2, true);
1200       test_reserved_space3(lp * 8, lp * 2, true);
1201     }
1202   }
1203 
1204   static void test_reserved_space() {
1205     test_reserved_space1();
1206     test_reserved_space2();
1207     test_reserved_space3();
1208   }
1209 };
1210 
1211 void TestReservedSpace_test() {
1212   TestReservedSpace::test_reserved_space();
1213 }
1214 
1215 #define assert_equals(actual, expected)  \
1216   assert(actual == expected,             \
1217          "Got " SIZE_FORMAT " expected " \
1218          SIZE_FORMAT, actual, expected);
1219 
1220 #define assert_ge(value1, value2)                  \
1221   assert(value1 >= value2,                         \
1222          "'" #value1 "': " SIZE_FORMAT " '"        \
1223          #value2 "': " SIZE_FORMAT, value1, value2);
1224 
1225 #define assert_lt(value1, value2)                  \
1226   assert(value1 < value2,                          \
1227          "'" #value1 "': " SIZE_FORMAT " '"        \
1228          #value2 "': " SIZE_FORMAT, value1, value2);
1229 
1230 
1231 class TestVirtualSpace : AllStatic {
1232   enum TestLargePages {
1233     Default,
1234     Disable,
1235     Reserve,
1236     Commit
1237   };
1238 
1239   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
1240     switch(mode) {
1241     default:
1242     case Default:
1243     case Reserve:
1244       return ReservedSpace(reserve_size_aligned);
1245     case Disable:
1246     case Commit:
1247       return ReservedSpace(reserve_size_aligned,
1248                            os::vm_allocation_granularity(),
1249                            /* large */ false, /* exec */ false);
1250     }
1251   }
1252 
1253   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1254     switch(mode) {
1255     default:
1256     case Default:
1257     case Reserve:
1258       return vs.initialize(rs, 0);
1259     case Disable:
1260       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1261     case Commit:
1262       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1263     }
1264   }
1265 
1266  public:
1267   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1268                                                         TestLargePages mode = Default) {
1269     size_t granularity = os::vm_allocation_granularity();
1270     size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
1271 
1272     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1273 
1274     assert(reserved.is_reserved(), "Must be");
1275 
1276     VirtualSpace vs;
1277     bool initialized = initialize_virtual_space(vs, reserved, mode);
1278     assert(initialized, "Failed to initialize VirtualSpace");
1279 
1280     vs.expand_by(commit_size, false);
1281 
1282     if (vs.special()) {
1283       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1284     } else {
1285       assert_ge(vs.actual_committed_size(), commit_size);
1286       // Approximate the commit granularity.
1287       // Make sure that we don't commit using large pages
1288       // if large pages has been disabled for this VirtualSpace.
1289       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1290                                    os::vm_page_size() : os::large_page_size();
1291       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1292     }
1293 
1294     reserved.release();
1295   }
1296 
1297   static void test_virtual_space_actual_committed_space_one_large_page() {
1298     if (!UseLargePages) {
1299       return;
1300     }
1301 
1302     size_t large_page_size = os::large_page_size();
1303 
1304     ReservedSpace reserved(large_page_size, large_page_size, true, false);
1305 
1306     assert(reserved.is_reserved(), "Must be");
1307 
1308     VirtualSpace vs;
1309     bool initialized = vs.initialize(reserved, 0);
1310     assert(initialized, "Failed to initialize VirtualSpace");
1311 
1312     vs.expand_by(large_page_size, false);
1313 
1314     assert_equals(vs.actual_committed_size(), large_page_size);
1315 
1316     reserved.release();
1317   }
1318 
1319   static void test_virtual_space_actual_committed_space() {
1320     test_virtual_space_actual_committed_space(4 * K, 0);
1321     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1322     test_virtual_space_actual_committed_space(8 * K, 0);
1323     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1324     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1325     test_virtual_space_actual_committed_space(12 * K, 0);
1326     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1327     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1328     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1329     test_virtual_space_actual_committed_space(64 * K, 0);
1330     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1331     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1332     test_virtual_space_actual_committed_space(2 * M, 0);
1333     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1334     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1335     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1336     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1337     test_virtual_space_actual_committed_space(10 * M, 0);
1338     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1339     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1340     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1341     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1342     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1343     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1344   }
1345 
1346   static void test_virtual_space_disable_large_pages() {
1347     if (!UseLargePages) {
1348       return;
1349     }
1350     // These test cases verify that if we force VirtualSpace to disable large pages
1351     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1352     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1353     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1354     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1355     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1356     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1357     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1358 
1359     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1360     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1361     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1362     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1363     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1364     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1365     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1366 
1367     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1368     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1369     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1370     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1371     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1372     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1373     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1374   }
1375 
1376   static void test_virtual_space() {
1377     test_virtual_space_actual_committed_space();
1378     test_virtual_space_actual_committed_space_one_large_page();
1379     test_virtual_space_disable_large_pages();
1380   }
1381 };
1382 
1383 void TestVirtualSpace_test() {
1384   TestVirtualSpace::test_virtual_space();
1385 }
1386 
1387 #endif // PRODUCT
1388 
1389 #endif