1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "logging/log.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "memory/virtualspace.hpp"
  29 #include "oops/markOop.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "runtime/os.inline.hpp"
  32 #include "services/memTracker.hpp"
  33 #include "utilities/align.hpp"
  34 
  35 // ReservedSpace
  36 
  37 // Dummy constructor
  38 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  39     _alignment(0), _special(false), _executable(false), _fd_for_heap(-1) {
  40 }
  41 
  42 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) {
  43   bool has_preferred_page_size = preferred_page_size != 0;
  44   // Want to use large pages where possible and pad with small pages.
  45   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
  46   bool large_pages = page_size != (size_t)os::vm_page_size();
  47   size_t alignment;
  48   if (large_pages && has_preferred_page_size) {
  49     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  50     // ReservedSpace initialization requires size to be aligned to the given
  51     // alignment. Align the size up.
  52     size = align_up(size, alignment);
  53   } else {
  54     // Don't force the alignment to be large page aligned,
  55     // since that will waste memory.
  56     alignment = os::vm_allocation_granularity();
  57   }
  58   initialize(size, alignment, large_pages, NULL, false);
  59 }
  60 
  61 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  62                              bool large,
  63                              char* requested_address) : _fd_for_heap(-1) {
  64   initialize(size, alignment, large, requested_address, false);
  65 }
  66 
  67 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  68                              bool large,
  69                              bool executable) : _fd_for_heap(-1) {
  70   initialize(size, alignment, large, NULL, executable);
  71 }
  72 
  73 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
  74                              bool special, bool executable) : _fd_for_heap(-1) {
  75   assert((size % os::vm_allocation_granularity()) == 0,
  76          "size not allocation aligned");
  77   _base = base;
  78   _size = size;
  79   _alignment = alignment;
  80   _noaccess_prefix = 0;
  81   _special = special;
  82   _executable = executable;
  83 }
  84 
  85 // Helper method
  86 static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) {
  87   if (is_file_mapped) {
  88     if (!os::unmap_memory(base, size)) {
  89       fatal("os::unmap_memory failed");
  90     }
  91   } else if (!os::release_memory(base, size)) {
  92     fatal("os::release_memory failed");
  93   }
  94 }
  95 
  96 // Helper method.
  97 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  98                                            const size_t size, bool special, bool is_file_mapped = false)
  99 {
 100   if (base == requested_address || requested_address == NULL)
 101     return false; // did not fail
 102 
 103   if (base != NULL) {
 104     // Different reserve address may be acceptable in other cases
 105     // but for compressed oops heap should be at requested address.
 106     assert(UseCompressedOops, "currently requested address used only for compressed oops");
 107     log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
 108     // OS ignored requested address. Try different address.
 109     if (special) {
 110       if (!os::release_memory_special(base, size)) {
 111         fatal("os::release_memory_special failed");
 112       }
 113     } else {
 114       unmap_or_release_memory(base, size, is_file_mapped);
 115     }
 116   }
 117   return true;
 118 }
 119 
 120 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
 121                                char* requested_address,
 122                                bool executable) {
 123   const size_t granularity = os::vm_allocation_granularity();
 124   assert((size & (granularity - 1)) == 0,
 125          "size not aligned to os::vm_allocation_granularity()");
 126   assert((alignment & (granularity - 1)) == 0,
 127          "alignment not aligned to os::vm_allocation_granularity()");
 128   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 129          "not a power of 2");
 130 
 131   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 132 
 133   _base = NULL;
 134   _size = 0;
 135   _special = false;
 136   _executable = executable;
 137   _alignment = 0;
 138   _noaccess_prefix = 0;
 139   if (size == 0) {
 140     return;
 141   }
 142 
 143   // If OS doesn't support demand paging for large page memory, we need
 144   // to use reserve_memory_special() to reserve and pin the entire region.
 145   // If there is a backing file directory for this space then whether
 146   // large pages are allocated is up to the filesystem of the backing file.
 147   // So we ignore the UseLargePages flag in this case.
 148   bool special = large && !os::can_commit_large_page_memory();
 149   if (special && _fd_for_heap != -1) {
 150     special = false;
 151     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 152       !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 153       log_debug(gc, heap)("Ignoring UseLargePages since large page support is up to the file system of the backing file for Java heap");
 154     }
 155   }
 156 
 157   char* base = NULL;
 158 
 159   if (special) {
 160 
 161     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 162 
 163     if (base != NULL) {
 164       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 165         // OS ignored requested address. Try different address.
 166         return;
 167       }
 168       // Check alignment constraints.
 169       assert((uintptr_t) base % alignment == 0,
 170              "Large pages returned a non-aligned address, base: "
 171              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 172              p2i(base), alignment);
 173       _special = true;
 174     } else {
 175       // failed; try to reserve regular memory below
 176       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 177                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 178         log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 179       }
 180     }
 181   }
 182 
 183   if (base == NULL) {
 184     // Optimistically assume that the OSes returns an aligned base pointer.
 185     // When reserving a large address range, most OSes seem to align to at
 186     // least 64K.
 187 
 188     // If the memory was requested at a particular address, use
 189     // os::attempt_reserve_memory_at() to avoid over mapping something
 190     // important.  If available space is not detected, return NULL.
 191 
 192     if (requested_address != 0) {
 193       base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
 194       if (failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
 195         // OS ignored requested address. Try different address.
 196         base = NULL;
 197       }
 198     } else {
 199       base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
 200     }
 201 
 202     if (base == NULL) return;
 203 
 204     // Check alignment constraints
 205     if ((((size_t)base) & (alignment - 1)) != 0) {
 206       // Base not aligned, retry
 207       unmap_or_release_memory(base, size, _fd_for_heap != -1 /*is_file_mapped*/);
 208 
 209       // Make sure that size is aligned
 210       size = align_up(size, alignment);
 211       base = os::reserve_memory_aligned(size, alignment, _fd_for_heap);
 212 
 213       if (requested_address != 0 &&
 214           failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
 215         // As a result of the alignment constraints, the allocated base differs
 216         // from the requested address. Return back to the caller who can
 217         // take remedial action (like try again without a requested address).
 218         assert(_base == NULL, "should be");
 219         return;
 220       }
 221     }
 222   }
 223   // Done
 224   _base = base;
 225   _size = size;
 226   _alignment = alignment;
 227   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 228   if (_fd_for_heap != -1) {
 229     _special = true;
 230   }
 231 }
 232 
 233 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 234                                         bool split, bool realloc) {
 235   assert(partition_size <= size(), "partition failed");
 236   if (split) {
 237     os::split_reserved_memory(base(), size(), partition_size, realloc);
 238   }
 239   ReservedSpace result(base(), partition_size, alignment, special(),
 240                        executable());
 241   return result;
 242 }
 243 
 244 
 245 ReservedSpace
 246 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 247   assert(partition_size <= size(), "partition failed");
 248   ReservedSpace result(base() + partition_size, size() - partition_size,
 249                        alignment, special(), executable());
 250   return result;
 251 }
 252 
 253 
 254 size_t ReservedSpace::page_align_size_up(size_t size) {
 255   return align_up(size, os::vm_page_size());
 256 }
 257 
 258 
 259 size_t ReservedSpace::page_align_size_down(size_t size) {
 260   return align_down(size, os::vm_page_size());
 261 }
 262 
 263 
 264 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 265   return align_up(size, os::vm_allocation_granularity());
 266 }
 267 
 268 
 269 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 270   return align_down(size, os::vm_allocation_granularity());
 271 }
 272 
 273 
 274 void ReservedSpace::release() {
 275   if (is_reserved()) {
 276     char *real_base = _base - _noaccess_prefix;
 277     const size_t real_size = _size + _noaccess_prefix;
 278     if (special()) {
 279       if (_fd_for_heap != -1) {
 280         os::unmap_memory(real_base, real_size);
 281       } else {
 282         os::release_memory_special(real_base, real_size);
 283       }
 284     } else{
 285       os::release_memory(real_base, real_size);
 286     }
 287     _base = NULL;
 288     _size = 0;
 289     _noaccess_prefix = 0;
 290     _alignment = 0;
 291     _special = false;
 292     _executable = false;
 293   }
 294 }
 295 
 296 static size_t noaccess_prefix_size(size_t alignment) {
 297   return lcm(os::vm_page_size(), alignment);
 298 }
 299 
 300 void ReservedHeapSpace::establish_noaccess_prefix() {
 301   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 302   _noaccess_prefix = noaccess_prefix_size(_alignment);
 303 
 304   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 305     if (true
 306         WIN64_ONLY(&& !UseLargePages)
 307         AIX_ONLY(&& os::vm_page_size() != 64*K)) {
 308       // Protect memory at the base of the allocated region.
 309       // If special, the page was committed (only matters on windows)
 310       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 311         fatal("cannot protect protection page");
 312       }
 313       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
 314                                  PTR_FORMAT " / " INTX_FORMAT " bytes",
 315                                  p2i(_base),
 316                                  _noaccess_prefix);
 317       assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
 318     } else {
 319       Universe::set_narrow_oop_use_implicit_null_checks(false);
 320     }
 321   }
 322 
 323   _base += _noaccess_prefix;
 324   _size -= _noaccess_prefix;
 325   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 326 }
 327 
 328 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 329 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 330 // might still fulfill the wishes of the caller.
 331 // Assures the memory is aligned to 'alignment'.
 332 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 333 void ReservedHeapSpace::try_reserve_heap(size_t size,
 334                                          size_t alignment,
 335                                          bool large,
 336                                          char* requested_address) {
 337   if (_base != NULL) {
 338     // We tried before, but we didn't like the address delivered.
 339     release();
 340   }
 341 
 342   // If OS doesn't support demand paging for large page memory, we need
 343   // to use reserve_memory_special() to reserve and pin the entire region.
 344   // If there is a backing file directory for this space then whether
 345   // large pages are allocated is up to the filesystem of the backing file.
 346   // So we ignore the UseLargePages flag in this case.
 347   bool special = large && !os::can_commit_large_page_memory();
 348   if (special && _fd_for_heap != -1) {
 349     special = false;
 350     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 351                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 352       log_debug(gc, heap)("Cannot allocate large pages for Java Heap when AllocateHeapAt option is set.");
 353     }
 354   }
 355   char* base = NULL;
 356 
 357   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
 358                              " heap of size " SIZE_FORMAT_HEX,
 359                              p2i(requested_address),
 360                              size);
 361 
 362   if (special) {
 363     base = os::reserve_memory_special(size, alignment, requested_address, false);
 364 
 365     if (base != NULL) {
 366       // Check alignment constraints.
 367       assert((uintptr_t) base % alignment == 0,
 368              "Large pages returned a non-aligned address, base: "
 369              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 370              p2i(base), alignment);
 371       _special = true;
 372     }
 373   }
 374 
 375   if (base == NULL) {
 376     // Failed; try to reserve regular memory below
 377     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 378                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 379       log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 380     }
 381 
 382     // Optimistically assume that the OSes returns an aligned base pointer.
 383     // When reserving a large address range, most OSes seem to align to at
 384     // least 64K.
 385 
 386     // If the memory was requested at a particular address, use
 387     // os::attempt_reserve_memory_at() to avoid over mapping something
 388     // important.  If available space is not detected, return NULL.
 389 
 390     if (requested_address != 0) {
 391       base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
 392     } else {
 393       base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
 394     }
 395   }
 396   if (base == NULL) { return; }
 397 
 398   // Done
 399   _base = base;
 400   _size = size;
 401   _alignment = alignment;
 402 
 403   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 404   if (_fd_for_heap != -1) {
 405     _special = true;
 406   }
 407 
 408   // Check alignment constraints
 409   if ((((size_t)base) & (alignment - 1)) != 0) {
 410     // Base not aligned, retry.
 411     release();
 412   }
 413 }
 414 
 415 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 416                                           char *lowest_start,
 417                                           size_t attach_point_alignment,
 418                                           char *aligned_heap_base_min_address,
 419                                           char *upper_bound,
 420                                           size_t size,
 421                                           size_t alignment,
 422                                           bool large) {
 423   const size_t attach_range = highest_start - lowest_start;
 424   // Cap num_attempts at possible number.
 425   // At least one is possible even for 0 sized attach range.
 426   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
 427   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
 428 
 429   const size_t stepsize = (attach_range == 0) ? // Only one try.
 430     (size_t) highest_start : align_up(attach_range / num_attempts_to_try, attach_point_alignment);
 431 
 432   // Try attach points from top to bottom.
 433   char* attach_point = highest_start;
 434   while (attach_point >= lowest_start  &&
 435          attach_point <= highest_start &&  // Avoid wrap around.
 436          ((_base == NULL) ||
 437           (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
 438     try_reserve_heap(size, alignment, large, attach_point);
 439     attach_point -= stepsize;
 440   }
 441 }
 442 
 443 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
 444 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
 445 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
 446 
 447 // Helper for heap allocation. Returns an array with addresses
 448 // (OS-specific) which are suited for disjoint base mode. Array is
 449 // NULL terminated.
 450 static char** get_attach_addresses_for_disjoint_mode() {
 451   static uint64_t addresses[] = {
 452      2 * SIZE_32G,
 453      3 * SIZE_32G,
 454      4 * SIZE_32G,
 455      8 * SIZE_32G,
 456     10 * SIZE_32G,
 457      1 * SIZE_64K * SIZE_32G,
 458      2 * SIZE_64K * SIZE_32G,
 459      3 * SIZE_64K * SIZE_32G,
 460      4 * SIZE_64K * SIZE_32G,
 461     16 * SIZE_64K * SIZE_32G,
 462     32 * SIZE_64K * SIZE_32G,
 463     34 * SIZE_64K * SIZE_32G,
 464     0
 465   };
 466 
 467   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
 468   // the array is sorted.
 469   uint i = 0;
 470   while (addresses[i] != 0 &&
 471          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
 472     i++;
 473   }
 474   uint start = i;
 475 
 476   // Avoid more steps than requested.
 477   i = 0;
 478   while (addresses[start+i] != 0) {
 479     if (i == HeapSearchSteps) {
 480       addresses[start+i] = 0;
 481       break;
 482     }
 483     i++;
 484   }
 485 
 486   return (char**) &addresses[start];
 487 }
 488 
 489 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
 490   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
 491             "can not allocate compressed oop heap for this size");
 492   guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
 493   assert(HeapBaseMinAddress > 0, "sanity");
 494 
 495   const size_t granularity = os::vm_allocation_granularity();
 496   assert((size & (granularity - 1)) == 0,
 497          "size not aligned to os::vm_allocation_granularity()");
 498   assert((alignment & (granularity - 1)) == 0,
 499          "alignment not aligned to os::vm_allocation_granularity()");
 500   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 501          "not a power of 2");
 502 
 503   // The necessary attach point alignment for generated wish addresses.
 504   // This is needed to increase the chance of attaching for mmap and shmat.
 505   const size_t os_attach_point_alignment =
 506     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
 507     NOT_AIX(os::vm_allocation_granularity());
 508   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 509 
 510   char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment);
 511   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
 512     noaccess_prefix_size(alignment) : 0;
 513 
 514   // Attempt to alloc at user-given address.
 515   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 516     try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
 517     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
 518       release();
 519     }
 520   }
 521 
 522   // Keep heap at HeapBaseMinAddress.
 523   if (_base == NULL) {
 524 
 525     // Try to allocate the heap at addresses that allow efficient oop compression.
 526     // Different schemes are tried, in order of decreasing optimization potential.
 527     //
 528     // For this, try_reserve_heap() is called with the desired heap base addresses.
 529     // A call into the os layer to allocate at a given address can return memory
 530     // at a different address than requested.  Still, this might be memory at a useful
 531     // address. try_reserve_heap() always returns this allocated memory, as only here
 532     // the criteria for a good heap are checked.
 533 
 534     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 535     // Give it several tries from top of range to bottom.
 536     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
 537 
 538       // Calc address range within we try to attach (range of possible start addresses).
 539       char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 540       char* const lowest_start  = align_up(aligned_heap_base_min_address, attach_point_alignment);
 541       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 542                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
 543     }
 544 
 545     // zerobased: Attempt to allocate in the lower 32G.
 546     // But leave room for the compressed class pointers, which is allocated above
 547     // the heap.
 548     char *zerobased_max = (char *)OopEncodingHeapMax;
 549     const size_t class_space = align_up(CompressedClassSpaceSize, alignment);
 550     // For small heaps, save some space for compressed class pointer
 551     // space so it can be decoded with no base.
 552     if (UseCompressedClassPointers && !UseSharedSpaces &&
 553         OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
 554         (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
 555       zerobased_max = (char *)OopEncodingHeapMax - class_space;
 556     }
 557 
 558     // Give it several tries from top of range to bottom.
 559     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
 560         ((_base == NULL) ||                        // No previous try succeeded.
 561          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 562 
 563       // Calc address range within we try to attach (range of possible start addresses).
 564       char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
 565       // Need to be careful about size being guaranteed to be less
 566       // than UnscaledOopHeapMax due to type constraints.
 567       char *lowest_start = aligned_heap_base_min_address;
 568       uint64_t unscaled_end = UnscaledOopHeapMax - size;
 569       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
 570         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
 571       }
 572       lowest_start = align_up(lowest_start, attach_point_alignment);
 573       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 574                         aligned_heap_base_min_address, zerobased_max, size, alignment, large);
 575     }
 576 
 577     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 578     // implement null checks.
 579     noaccess_prefix = noaccess_prefix_size(alignment);
 580 
 581     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
 582     char** addresses = get_attach_addresses_for_disjoint_mode();
 583     int i = 0;
 584     while (addresses[i] &&                                 // End of array not yet reached.
 585            ((_base == NULL) ||                             // No previous try succeeded.
 586             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 587              !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 588       char* const attach_point = addresses[i];
 589       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 590       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 591       i++;
 592     }
 593 
 594     // Last, desperate try without any placement.
 595     if (_base == NULL) {
 596       log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
 597       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 598     }
 599   }
 600 }
 601 
 602 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* heap_allocation_directory) : ReservedSpace() {
 603 
 604   if (size == 0) {
 605     return;
 606   }
 607 
 608   if (heap_allocation_directory != NULL) {
 609     _fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
 610     if (_fd_for_heap == -1) {
 611       vm_exit_during_initialization(
 612         err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
 613     }
 614   }
 615 
 616   // Heap size should be aligned to alignment, too.
 617   guarantee(is_aligned(size, alignment), "set by caller");
 618 
 619   if (UseCompressedOops) {
 620     initialize_compressed_heap(size, alignment, large);
 621     if (_size > size) {
 622       // We allocated heap with noaccess prefix.
 623       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 624       // if we had to try at arbitrary address.
 625       establish_noaccess_prefix();
 626     }
 627   } else {
 628     initialize(size, alignment, large, NULL, false);
 629   }
 630 
 631   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 632          "area must be distinguishable from marks for mark-sweep");
 633   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 634          "area must be distinguishable from marks for mark-sweep");
 635 
 636   if (base() != NULL) {
 637     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 638   }
 639 
 640   if (_fd_for_heap != -1) {
 641     os::close(_fd_for_heap);
 642   }
 643 }
 644 
 645 // Reserve space for code segment.  Same as Java heap only we mark this as
 646 // executable.
 647 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 648                                      size_t rs_align,
 649                                      bool large) :
 650   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 651   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 652 }
 653 
 654 // VirtualSpace
 655 
 656 VirtualSpace::VirtualSpace() {
 657   _low_boundary           = NULL;
 658   _high_boundary          = NULL;
 659   _low                    = NULL;
 660   _high                   = NULL;
 661   _lower_high             = NULL;
 662   _middle_high            = NULL;
 663   _upper_high             = NULL;
 664   _lower_high_boundary    = NULL;
 665   _middle_high_boundary   = NULL;
 666   _upper_high_boundary    = NULL;
 667   _lower_alignment        = 0;
 668   _middle_alignment       = 0;
 669   _upper_alignment        = 0;
 670   _special                = false;
 671   _executable             = false;
 672 }
 673 
 674 
 675 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 676   const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
 677   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 678 }
 679 
 680 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 681   if(!rs.is_reserved()) return false;  // allocation failed.
 682   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 683   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 684 
 685   _low_boundary  = rs.base();
 686   _high_boundary = low_boundary() + rs.size();
 687 
 688   _low = low_boundary();
 689   _high = low();
 690 
 691   _special = rs.special();
 692   _executable = rs.executable();
 693 
 694   // When a VirtualSpace begins life at a large size, make all future expansion
 695   // and shrinking occur aligned to a granularity of large pages.  This avoids
 696   // fragmentation of physical addresses that inhibits the use of large pages
 697   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 698   // page size, the only spaces that get handled this way are codecache and
 699   // the heap itself, both of which provide a substantial performance
 700   // boost in many benchmarks when covered by large pages.
 701   //
 702   // No attempt is made to force large page alignment at the very top and
 703   // bottom of the space if they are not aligned so already.
 704   _lower_alignment  = os::vm_page_size();
 705   _middle_alignment = max_commit_granularity;
 706   _upper_alignment  = os::vm_page_size();
 707 
 708   // End of each region
 709   _lower_high_boundary = align_up(low_boundary(), middle_alignment());
 710   _middle_high_boundary = align_down(high_boundary(), middle_alignment());
 711   _upper_high_boundary = high_boundary();
 712 
 713   // High address of each region
 714   _lower_high = low_boundary();
 715   _middle_high = lower_high_boundary();
 716   _upper_high = middle_high_boundary();
 717 
 718   // commit to initial size
 719   if (committed_size > 0) {
 720     if (!expand_by(committed_size)) {
 721       return false;
 722     }
 723   }
 724   return true;
 725 }
 726 
 727 
 728 VirtualSpace::~VirtualSpace() {
 729   release();
 730 }
 731 
 732 
 733 void VirtualSpace::release() {
 734   // This does not release memory it reserved.
 735   // Caller must release via rs.release();
 736   _low_boundary           = NULL;
 737   _high_boundary          = NULL;
 738   _low                    = NULL;
 739   _high                   = NULL;
 740   _lower_high             = NULL;
 741   _middle_high            = NULL;
 742   _upper_high             = NULL;
 743   _lower_high_boundary    = NULL;
 744   _middle_high_boundary   = NULL;
 745   _upper_high_boundary    = NULL;
 746   _lower_alignment        = 0;
 747   _middle_alignment       = 0;
 748   _upper_alignment        = 0;
 749   _special                = false;
 750   _executable             = false;
 751 }
 752 
 753 
 754 size_t VirtualSpace::committed_size() const {
 755   return pointer_delta(high(), low(), sizeof(char));
 756 }
 757 
 758 
 759 size_t VirtualSpace::reserved_size() const {
 760   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 761 }
 762 
 763 
 764 size_t VirtualSpace::uncommitted_size()  const {
 765   return reserved_size() - committed_size();
 766 }
 767 
 768 size_t VirtualSpace::actual_committed_size() const {
 769   // Special VirtualSpaces commit all reserved space up front.
 770   if (special()) {
 771     return reserved_size();
 772   }
 773 
 774   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 775   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 776   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 777 
 778 #ifdef ASSERT
 779   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 780   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 781   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 782 
 783   if (committed_high > 0) {
 784     assert(committed_low == lower, "Must be");
 785     assert(committed_middle == middle, "Must be");
 786   }
 787 
 788   if (committed_middle > 0) {
 789     assert(committed_low == lower, "Must be");
 790   }
 791   if (committed_middle < middle) {
 792     assert(committed_high == 0, "Must be");
 793   }
 794 
 795   if (committed_low < lower) {
 796     assert(committed_high == 0, "Must be");
 797     assert(committed_middle == 0, "Must be");
 798   }
 799 #endif
 800 
 801   return committed_low + committed_middle + committed_high;
 802 }
 803 
 804 
 805 bool VirtualSpace::contains(const void* p) const {
 806   return low() <= (const char*) p && (const char*) p < high();
 807 }
 808 
 809 static void pretouch_expanded_memory(void* start, void* end) {
 810   assert(is_aligned(start, os::vm_page_size()), "Unexpected alignment");
 811   assert(is_aligned(end,   os::vm_page_size()), "Unexpected alignment");
 812 
 813   os::pretouch_memory(start, end);
 814 }
 815 
 816 static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) {
 817   if (os::commit_memory(start, size, alignment, executable)) {
 818     if (pre_touch || AlwaysPreTouch) {
 819       pretouch_expanded_memory(start, start + size);
 820     }
 821     return true;
 822   }
 823 
 824   debug_only(warning(
 825       "INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMAT
 826       " size=" SIZE_FORMAT ", executable=%d) failed",
 827       p2i(start), p2i(start + size), size, executable);)
 828 
 829   return false;
 830 }
 831 
 832 /*
 833    First we need to determine if a particular virtual space is using large
 834    pages.  This is done at the initialize function and only virtual spaces
 835    that are larger than LargePageSizeInBytes use large pages.  Once we
 836    have determined this, all expand_by and shrink_by calls must grow and
 837    shrink by large page size chunks.  If a particular request
 838    is within the current large page, the call to commit and uncommit memory
 839    can be ignored.  In the case that the low and high boundaries of this
 840    space is not large page aligned, the pages leading to the first large
 841    page address and the pages after the last large page address must be
 842    allocated with default pages.
 843 */
 844 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 845   if (uncommitted_size() < bytes) {
 846     return false;
 847   }
 848 
 849   if (special()) {
 850     // don't commit memory if the entire space is pinned in memory
 851     _high += bytes;
 852     return true;
 853   }
 854 
 855   char* previous_high = high();
 856   char* unaligned_new_high = high() + bytes;
 857   assert(unaligned_new_high <= high_boundary(), "cannot expand by more than upper boundary");
 858 
 859   // Calculate where the new high for each of the regions should be.  If
 860   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 861   // then the unaligned lower and upper new highs would be the
 862   // lower_high() and upper_high() respectively.
 863   char* unaligned_lower_new_high =  MIN2(unaligned_new_high, lower_high_boundary());
 864   char* unaligned_middle_new_high = MIN2(unaligned_new_high, middle_high_boundary());
 865   char* unaligned_upper_new_high =  MIN2(unaligned_new_high, upper_high_boundary());
 866 
 867   // Align the new highs based on the regions alignment.  lower and upper
 868   // alignment will always be default page size.  middle alignment will be
 869   // LargePageSizeInBytes if the actual size of the virtual space is in
 870   // fact larger than LargePageSizeInBytes.
 871   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
 872   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
 873   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
 874 
 875   // Determine which regions need to grow in this expand_by call.
 876   // If you are growing in the lower region, high() must be in that
 877   // region so calculate the size based on high().  For the middle and
 878   // upper regions, determine the starting point of growth based on the
 879   // location of high().  By getting the MAX of the region's low address
 880   // (or the previous region's high address) and high(), we can tell if it
 881   // is an intra or inter region growth.
 882   size_t lower_needs = 0;
 883   if (aligned_lower_new_high > lower_high()) {
 884     lower_needs = pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 885   }
 886   size_t middle_needs = 0;
 887   if (aligned_middle_new_high > middle_high()) {
 888     middle_needs = pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 889   }
 890   size_t upper_needs = 0;
 891   if (aligned_upper_new_high > upper_high()) {
 892     upper_needs = pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 893   }
 894 
 895   // Check contiguity.
 896   assert(low_boundary() <= lower_high() && lower_high() <= lower_high_boundary(),
 897          "high address must be contained within the region");
 898   assert(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary(),
 899          "high address must be contained within the region");
 900   assert(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary(),
 901          "high address must be contained within the region");
 902 
 903   // Commit regions
 904   if (lower_needs > 0) {
 905     assert(lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region");
 906     if (!commit_expanded(lower_high(), lower_needs, _lower_alignment, pre_touch, _executable)) {
 907       return false;
 908     }
 909     _lower_high += lower_needs;
 910   }
 911 
 912   if (middle_needs > 0) {
 913     assert(middle_high() + middle_needs <= middle_high_boundary(), "must not expand beyond region");
 914     if (!commit_expanded(middle_high(), middle_needs, _middle_alignment, pre_touch, _executable)) {
 915       return false;
 916     }
 917     _middle_high += middle_needs;
 918   }
 919 
 920   if (upper_needs > 0) {
 921     assert(upper_high() + upper_needs <= upper_high_boundary(), "must not expand beyond region");
 922     if (!commit_expanded(upper_high(), upper_needs, _upper_alignment, pre_touch, _executable)) {
 923       return false;
 924     }
 925     _upper_high += upper_needs;
 926   }
 927 
 928   _high += bytes;
 929   return true;
 930 }
 931 
 932 // A page is uncommitted if the contents of the entire page is deemed unusable.
 933 // Continue to decrement the high() pointer until it reaches a page boundary
 934 // in which case that particular page can now be uncommitted.
 935 void VirtualSpace::shrink_by(size_t size) {
 936   if (committed_size() < size)
 937     fatal("Cannot shrink virtual space to negative size");
 938 
 939   if (special()) {
 940     // don't uncommit if the entire space is pinned in memory
 941     _high -= size;
 942     return;
 943   }
 944 
 945   char* unaligned_new_high = high() - size;
 946   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 947 
 948   // Calculate new unaligned address
 949   char* unaligned_upper_new_high =
 950     MAX2(unaligned_new_high, middle_high_boundary());
 951   char* unaligned_middle_new_high =
 952     MAX2(unaligned_new_high, lower_high_boundary());
 953   char* unaligned_lower_new_high =
 954     MAX2(unaligned_new_high, low_boundary());
 955 
 956   // Align address to region's alignment
 957   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
 958   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
 959   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
 960 
 961   // Determine which regions need to shrink
 962   size_t upper_needs = 0;
 963   if (aligned_upper_new_high < upper_high()) {
 964     upper_needs =
 965       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 966   }
 967   size_t middle_needs = 0;
 968   if (aligned_middle_new_high < middle_high()) {
 969     middle_needs =
 970       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 971   }
 972   size_t lower_needs = 0;
 973   if (aligned_lower_new_high < lower_high()) {
 974     lower_needs =
 975       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 976   }
 977 
 978   // Check contiguity.
 979   assert(middle_high_boundary() <= upper_high() &&
 980          upper_high() <= upper_high_boundary(),
 981          "high address must be contained within the region");
 982   assert(lower_high_boundary() <= middle_high() &&
 983          middle_high() <= middle_high_boundary(),
 984          "high address must be contained within the region");
 985   assert(low_boundary() <= lower_high() &&
 986          lower_high() <= lower_high_boundary(),
 987          "high address must be contained within the region");
 988 
 989   // Uncommit
 990   if (upper_needs > 0) {
 991     assert(middle_high_boundary() <= aligned_upper_new_high &&
 992            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 993            "must not shrink beyond region");
 994     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 995       debug_only(warning("os::uncommit_memory failed"));
 996       return;
 997     } else {
 998       _upper_high -= upper_needs;
 999     }
1000   }
1001   if (middle_needs > 0) {
1002     assert(lower_high_boundary() <= aligned_middle_new_high &&
1003            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
1004            "must not shrink beyond region");
1005     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
1006       debug_only(warning("os::uncommit_memory failed"));
1007       return;
1008     } else {
1009       _middle_high -= middle_needs;
1010     }
1011   }
1012   if (lower_needs > 0) {
1013     assert(low_boundary() <= aligned_lower_new_high &&
1014            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
1015            "must not shrink beyond region");
1016     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
1017       debug_only(warning("os::uncommit_memory failed"));
1018       return;
1019     } else {
1020       _lower_high -= lower_needs;
1021     }
1022   }
1023 
1024   _high -= size;
1025 }
1026 
1027 #ifndef PRODUCT
1028 void VirtualSpace::check_for_contiguity() {
1029   // Check contiguity.
1030   assert(low_boundary() <= lower_high() &&
1031          lower_high() <= lower_high_boundary(),
1032          "high address must be contained within the region");
1033   assert(lower_high_boundary() <= middle_high() &&
1034          middle_high() <= middle_high_boundary(),
1035          "high address must be contained within the region");
1036   assert(middle_high_boundary() <= upper_high() &&
1037          upper_high() <= upper_high_boundary(),
1038          "high address must be contained within the region");
1039   assert(low() >= low_boundary(), "low");
1040   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
1041   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
1042   assert(high() <= upper_high(), "upper high");
1043 }
1044 
1045 void VirtualSpace::print_on(outputStream* out) {
1046   out->print   ("Virtual space:");
1047   if (special()) out->print(" (pinned in memory)");
1048   out->cr();
1049   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
1050   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
1051   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low()), p2i(high()));
1052   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low_boundary()), p2i(high_boundary()));
1053 }
1054 
1055 void VirtualSpace::print() {
1056   print_on(tty);
1057 }
1058 
1059 /////////////// Unit tests ///////////////
1060 
1061 #ifndef PRODUCT
1062 
1063 #define test_log(...) \
1064   do {\
1065     if (VerboseInternalVMTests) { \
1066       tty->print_cr(__VA_ARGS__); \
1067       tty->flush(); \
1068     }\
1069   } while (false)
1070 
1071 class TestReservedSpace : AllStatic {
1072  public:
1073   static void small_page_write(void* addr, size_t size) {
1074     size_t page_size = os::vm_page_size();
1075 
1076     char* end = (char*)addr + size;
1077     for (char* p = (char*)addr; p < end; p += page_size) {
1078       *p = 1;
1079     }
1080   }
1081 
1082   static void release_memory_for_test(ReservedSpace rs) {
1083     if (rs.special()) {
1084       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1085     } else {
1086       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1087     }
1088   }
1089 
1090   static void test_reserved_space1(size_t size, size_t alignment) {
1091     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
1092 
1093     assert(is_aligned(size, alignment), "Incorrect input parameters");
1094 
1095     ReservedSpace rs(size,          // size
1096                      alignment,     // alignment
1097                      UseLargePages, // large
1098                      (char *)NULL); // requested_address
1099 
1100     test_log(" rs.special() == %d", rs.special());
1101 
1102     assert(rs.base() != NULL, "Must be");
1103     assert(rs.size() == size, "Must be");
1104 
1105     assert(is_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1106     assert(is_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1107 
1108     if (rs.special()) {
1109       small_page_write(rs.base(), size);
1110     }
1111 
1112     release_memory_for_test(rs);
1113   }
1114 
1115   static void test_reserved_space2(size_t size) {
1116     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
1117 
1118     assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1119 
1120     ReservedSpace rs(size);
1121 
1122     test_log(" rs.special() == %d", rs.special());
1123 
1124     assert(rs.base() != NULL, "Must be");
1125     assert(rs.size() == size, "Must be");
1126 
1127     if (rs.special()) {
1128       small_page_write(rs.base(), size);
1129     }
1130 
1131     release_memory_for_test(rs);
1132   }
1133 
1134   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1135     test_log("test_reserved_space3(%p, %p, %d)",
1136         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
1137 
1138     if (size < alignment) {
1139       // Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test.
1140       assert((size_t)os::vm_page_size() == os::large_page_size(), "Test needs further refinement");
1141       return;
1142     }
1143 
1144     assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1145     assert(is_aligned(size, alignment), "Must be at least aligned against alignment");
1146 
1147     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1148 
1149     ReservedSpace rs(size, alignment, large, false);
1150 
1151     test_log(" rs.special() == %d", rs.special());
1152 
1153     assert(rs.base() != NULL, "Must be");
1154     assert(rs.size() == size, "Must be");
1155 
1156     if (rs.special()) {
1157       small_page_write(rs.base(), size);
1158     }
1159 
1160     release_memory_for_test(rs);
1161   }
1162 
1163 
1164   static void test_reserved_space1() {
1165     size_t size = 2 * 1024 * 1024;
1166     size_t ag   = os::vm_allocation_granularity();
1167 
1168     test_reserved_space1(size,      ag);
1169     test_reserved_space1(size * 2,  ag);
1170     test_reserved_space1(size * 10, ag);
1171   }
1172 
1173   static void test_reserved_space2() {
1174     size_t size = 2 * 1024 * 1024;
1175     size_t ag = os::vm_allocation_granularity();
1176 
1177     test_reserved_space2(size * 1);
1178     test_reserved_space2(size * 2);
1179     test_reserved_space2(size * 10);
1180     test_reserved_space2(ag);
1181     test_reserved_space2(size - ag);
1182     test_reserved_space2(size);
1183     test_reserved_space2(size + ag);
1184     test_reserved_space2(size * 2);
1185     test_reserved_space2(size * 2 - ag);
1186     test_reserved_space2(size * 2 + ag);
1187     test_reserved_space2(size * 3);
1188     test_reserved_space2(size * 3 - ag);
1189     test_reserved_space2(size * 3 + ag);
1190     test_reserved_space2(size * 10);
1191     test_reserved_space2(size * 10 + size / 2);
1192   }
1193 
1194   static void test_reserved_space3() {
1195     size_t ag = os::vm_allocation_granularity();
1196 
1197     test_reserved_space3(ag,      ag    , false);
1198     test_reserved_space3(ag * 2,  ag    , false);
1199     test_reserved_space3(ag * 3,  ag    , false);
1200     test_reserved_space3(ag * 2,  ag * 2, false);
1201     test_reserved_space3(ag * 4,  ag * 2, false);
1202     test_reserved_space3(ag * 8,  ag * 2, false);
1203     test_reserved_space3(ag * 4,  ag * 4, false);
1204     test_reserved_space3(ag * 8,  ag * 4, false);
1205     test_reserved_space3(ag * 16, ag * 4, false);
1206 
1207     if (UseLargePages) {
1208       size_t lp = os::large_page_size();
1209 
1210       // Without large pages
1211       test_reserved_space3(lp,     ag * 4, false);
1212       test_reserved_space3(lp * 2, ag * 4, false);
1213       test_reserved_space3(lp * 4, ag * 4, false);
1214       test_reserved_space3(lp,     lp    , false);
1215       test_reserved_space3(lp * 2, lp    , false);
1216       test_reserved_space3(lp * 3, lp    , false);
1217       test_reserved_space3(lp * 2, lp * 2, false);
1218       test_reserved_space3(lp * 4, lp * 2, false);
1219       test_reserved_space3(lp * 8, lp * 2, false);
1220 
1221       // With large pages
1222       test_reserved_space3(lp, ag * 4    , true);
1223       test_reserved_space3(lp * 2, ag * 4, true);
1224       test_reserved_space3(lp * 4, ag * 4, true);
1225       test_reserved_space3(lp, lp        , true);
1226       test_reserved_space3(lp * 2, lp    , true);
1227       test_reserved_space3(lp * 3, lp    , true);
1228       test_reserved_space3(lp * 2, lp * 2, true);
1229       test_reserved_space3(lp * 4, lp * 2, true);
1230       test_reserved_space3(lp * 8, lp * 2, true);
1231     }
1232   }
1233 
1234   static void test_reserved_space() {
1235     test_reserved_space1();
1236     test_reserved_space2();
1237     test_reserved_space3();
1238   }
1239 };
1240 
1241 void TestReservedSpace_test() {
1242   TestReservedSpace::test_reserved_space();
1243 }
1244 
1245 #define assert_equals(actual, expected)  \
1246   assert(actual == expected,             \
1247          "Got " SIZE_FORMAT " expected " \
1248          SIZE_FORMAT, actual, expected);
1249 
1250 #define assert_ge(value1, value2)                  \
1251   assert(value1 >= value2,                         \
1252          "'" #value1 "': " SIZE_FORMAT " '"        \
1253          #value2 "': " SIZE_FORMAT, value1, value2);
1254 
1255 #define assert_lt(value1, value2)                  \
1256   assert(value1 < value2,                          \
1257          "'" #value1 "': " SIZE_FORMAT " '"        \
1258          #value2 "': " SIZE_FORMAT, value1, value2);
1259 
1260 
1261 class TestVirtualSpace : AllStatic {
1262   enum TestLargePages {
1263     Default,
1264     Disable,
1265     Reserve,
1266     Commit
1267   };
1268 
1269   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
1270     switch(mode) {
1271     default:
1272     case Default:
1273     case Reserve:
1274       return ReservedSpace(reserve_size_aligned);
1275     case Disable:
1276     case Commit:
1277       return ReservedSpace(reserve_size_aligned,
1278                            os::vm_allocation_granularity(),
1279                            /* large */ false, /* exec */ false);
1280     }
1281   }
1282 
1283   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1284     switch(mode) {
1285     default:
1286     case Default:
1287     case Reserve:
1288       return vs.initialize(rs, 0);
1289     case Disable:
1290       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1291     case Commit:
1292       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1293     }
1294   }
1295 
1296  public:
1297   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1298                                                         TestLargePages mode = Default) {
1299     size_t granularity = os::vm_allocation_granularity();
1300     size_t reserve_size_aligned = align_up(reserve_size, granularity);
1301 
1302     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1303 
1304     assert(reserved.is_reserved(), "Must be");
1305 
1306     VirtualSpace vs;
1307     bool initialized = initialize_virtual_space(vs, reserved, mode);
1308     assert(initialized, "Failed to initialize VirtualSpace");
1309 
1310     vs.expand_by(commit_size, false);
1311 
1312     if (vs.special()) {
1313       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1314     } else {
1315       assert_ge(vs.actual_committed_size(), commit_size);
1316       // Approximate the commit granularity.
1317       // Make sure that we don't commit using large pages
1318       // if large pages has been disabled for this VirtualSpace.
1319       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1320                                    os::vm_page_size() : os::large_page_size();
1321       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1322     }
1323 
1324     reserved.release();
1325   }
1326 
1327   static void test_virtual_space_actual_committed_space_one_large_page() {
1328     if (!UseLargePages) {
1329       return;
1330     }
1331 
1332     size_t large_page_size = os::large_page_size();
1333 
1334     ReservedSpace reserved(large_page_size, large_page_size, true, false);
1335 
1336     assert(reserved.is_reserved(), "Must be");
1337 
1338     VirtualSpace vs;
1339     bool initialized = vs.initialize(reserved, 0);
1340     assert(initialized, "Failed to initialize VirtualSpace");
1341 
1342     vs.expand_by(large_page_size, false);
1343 
1344     assert_equals(vs.actual_committed_size(), large_page_size);
1345 
1346     reserved.release();
1347   }
1348 
1349   static void test_virtual_space_actual_committed_space() {
1350     test_virtual_space_actual_committed_space(4 * K, 0);
1351     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1352     test_virtual_space_actual_committed_space(8 * K, 0);
1353     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1354     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1355     test_virtual_space_actual_committed_space(12 * K, 0);
1356     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1357     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1358     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1359     test_virtual_space_actual_committed_space(64 * K, 0);
1360     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1361     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1362     test_virtual_space_actual_committed_space(2 * M, 0);
1363     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1364     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1365     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1366     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1367     test_virtual_space_actual_committed_space(10 * M, 0);
1368     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1369     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1370     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1371     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1372     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1373     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1374   }
1375 
1376   static void test_virtual_space_disable_large_pages() {
1377     if (!UseLargePages) {
1378       return;
1379     }
1380     // These test cases verify that if we force VirtualSpace to disable large pages
1381     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1382     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1383     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1384     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1385     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1386     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1387     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1388 
1389     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1390     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1391     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1392     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1393     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1394     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1395     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1396 
1397     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1398     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1399     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1400     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1401     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1402     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1403     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1404   }
1405 
1406   static void test_virtual_space() {
1407     test_virtual_space_actual_committed_space();
1408     test_virtual_space_actual_committed_space_one_large_page();
1409     test_virtual_space_disable_large_pages();
1410   }
1411 };
1412 
1413 void TestVirtualSpace_test() {
1414   TestVirtualSpace::test_virtual_space();
1415 }
1416 
1417 #endif // PRODUCT
1418 
1419 #endif