1 /*
   2  * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeCacheExtensions.hpp"
  27 #include "logging/log.hpp"
  28 #include "memory/resourceArea.hpp"
  29 #include "memory/virtualspace.hpp"
  30 #include "oops/markOop.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "services/memTracker.hpp"
  33 
  34 // ReservedSpace
  35 
  36 // Dummy constructor
  37 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  38     _alignment(0), _special(false), _executable(false), _backing_fd(-1) {
  39 }
  40 
  41 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _backing_fd(-1) {
  42   bool has_preferred_page_size = preferred_page_size != 0;
  43   // Want to use large pages where possible and pad with small pages.
  44   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
  45   bool large_pages = page_size != (size_t)os::vm_page_size();
  46   size_t alignment;
  47   if (large_pages && has_preferred_page_size) {
  48     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  49     // ReservedSpace initialization requires size to be aligned to the given
  50     // alignment. Align the size up.
  51     size = align_size_up(size, alignment);
  52   } else {
  53     // Don't force the alignment to be large page aligned,
  54     // since that will waste memory.
  55     alignment = os::vm_allocation_granularity();
  56   }
  57   initialize(size, alignment, large_pages, NULL, false);
  58 }
  59 
  60 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  61                              bool large,
  62                              char* requested_address) : _backing_fd(-1) {
  63   initialize(size, alignment, large, requested_address, false);
  64 }
  65 
  66 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  67                              bool large,
  68                              bool executable) : _backing_fd(-1) {
  69   initialize(size, alignment, large, NULL, executable);
  70 }
  71 
  72 // Helper method.
  73 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  74                                            const size_t size, bool special)
  75 {
  76   if (base == requested_address || requested_address == NULL)
  77     return false; // did not fail
  78 
  79   if (base != NULL) {
  80     // Different reserve address may be acceptable in other cases
  81     // but for compressed oops heap should be at requested address.
  82     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  83     log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
  84     // OS ignored requested address. Try different address.
  85     if (special) {
  86       if (!os::release_memory_special(base, size)) {
  87         fatal("os::release_memory_special failed");
  88       }
  89     } else {
  90       if (!os::release_memory(base, size)) {
  91         fatal("os::release_memory failed");
  92       }
  93     }
  94   }
  95   return true;
  96 }
  97 
  98 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
  99                                char* requested_address,
 100                                bool executable) {
 101   const size_t granularity = os::vm_allocation_granularity();
 102   assert((size & (granularity - 1)) == 0,
 103          "size not aligned to os::vm_allocation_granularity()");
 104   assert((alignment & (granularity - 1)) == 0,
 105          "alignment not aligned to os::vm_allocation_granularity()");
 106   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 107          "not a power of 2");
 108 
 109   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 110 
 111   _base = NULL;
 112   _size = 0;
 113   _special = false;
 114   _executable = executable;
 115   _alignment = 0;
 116   _noaccess_prefix = 0;
 117   if (size == 0) {
 118     return;
 119   }
 120 
 121   // If OS doesn't support demand paging for large page memory, we need
 122   // to use reserve_memory_special() to reserve and pin the entire region.
 123   // If there is a backing file directory for this VirtualSpace then whether
 124   // large pages are allocated is upto the filesystem the dir resides in.
 125   // So we ignore the UseLargePages flag in this case.
 126   bool special = (_backing_fd == -1) && (large && !os::can_commit_large_page_memory());
 127   char* base = NULL;
 128 
 129   if (special) {
 130 
 131     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 132 
 133     if (base != NULL) {
 134       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 135         // OS ignored requested address. Try different address.
 136         return;
 137       }
 138       // Check alignment constraints.
 139       assert((uintptr_t) base % alignment == 0,
 140              "Large pages returned a non-aligned address, base: "
 141              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 142              p2i(base), alignment);
 143       _special = true;
 144     } else {
 145       // failed; try to reserve regular memory below
 146       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 147                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 148         log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 149       }
 150     }
 151   }
 152 
 153   if (base == NULL) {
 154     // Optimistically assume that the OSes returns an aligned base pointer.
 155     // When reserving a large address range, most OSes seem to align to at
 156     // least 64K.
 157 
 158     // If the memory was requested at a particular address, use
 159     // os::attempt_reserve_memory_at() to avoid over mapping something
 160     // important.  If available space is not detected, return NULL.
 161 
 162     if (requested_address != 0) {
 163       base = os::attempt_reserve_memory_at(size, requested_address, _backing_fd);
 164       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 165         // OS ignored requested address. Try different address.
 166         base = NULL;
 167       }
 168     } else {
 169       base = os::reserve_memory(size, NULL, alignment, _backing_fd);
 170     }
 171 
 172     if (base == NULL) return;
 173 
 174     // Check alignment constraints
 175     if ((((size_t)base) & (alignment - 1)) != 0) {
 176       // Base not aligned, retry
 177       if (_backing_fd != -1) {
 178         if (!os::unmap_memory(base, size)) fatal("os::release_memory failed");
 179       } else {
 180         if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 181       }
 182       // Make sure that size is aligned
 183       size = align_size_up(size, alignment);
 184       base = os::reserve_memory_aligned(size, alignment, _backing_fd);
 185 
 186       if (requested_address != 0 &&
 187           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 188         // As a result of the alignment constraints, the allocated base differs
 189         // from the requested address. Return back to the caller who can
 190         // take remedial action (like try again without a requested address).
 191         assert(_base == NULL, "should be");
 192         return;
 193       }
 194     }
 195   }
 196   // Done
 197   _base = base;
 198   _size = size;
 199   _alignment = alignment;
 200   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 201   if (_backing_fd != -1) {
 202     _special = true;
 203   }
 204 }
 205 
 206 
 207 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 208                              bool special, bool executable) {
 209   assert((size % os::vm_allocation_granularity()) == 0,
 210          "size not allocation aligned");
 211   _base = base;
 212   _size = size;
 213   _alignment = alignment;
 214   _noaccess_prefix = 0;
 215   _special = special;
 216   _executable = executable;
 217 }
 218 
 219 
 220 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 221                                         bool split, bool realloc) {
 222   assert(partition_size <= size(), "partition failed");
 223   if (split) {
 224     os::split_reserved_memory(base(), size(), partition_size, realloc);
 225   }
 226   ReservedSpace result(base(), partition_size, alignment, special(),
 227                        executable());
 228   return result;
 229 }
 230 
 231 
 232 ReservedSpace
 233 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 234   assert(partition_size <= size(), "partition failed");
 235   ReservedSpace result(base() + partition_size, size() - partition_size,
 236                        alignment, special(), executable());
 237   return result;
 238 }
 239 
 240 
 241 size_t ReservedSpace::page_align_size_up(size_t size) {
 242   return align_size_up(size, os::vm_page_size());
 243 }
 244 
 245 
 246 size_t ReservedSpace::page_align_size_down(size_t size) {
 247   return align_size_down(size, os::vm_page_size());
 248 }
 249 
 250 
 251 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 252   return align_size_up(size, os::vm_allocation_granularity());
 253 }
 254 
 255 
 256 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 257   return align_size_down(size, os::vm_allocation_granularity());
 258 }
 259 
 260 
 261 void ReservedSpace::release() {
 262   if (is_reserved()) {
 263     char *real_base = _base - _noaccess_prefix;
 264     const size_t real_size = _size + _noaccess_prefix;
 265     if (special()) {
 266       os::release_memory_special(real_base, real_size);
 267     } else{
 268       os::release_memory(real_base, real_size);
 269     }
 270     _base = NULL;
 271     _size = 0;
 272     _noaccess_prefix = 0;
 273     _alignment = 0;
 274     _special = false;
 275     _executable = false;
 276   }
 277 }
 278 
 279 static size_t noaccess_prefix_size(size_t alignment) {
 280   return lcm(os::vm_page_size(), alignment);
 281 }
 282 
 283 void ReservedHeapSpace::establish_noaccess_prefix() {
 284   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 285   _noaccess_prefix = noaccess_prefix_size(_alignment);
 286 
 287   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 288     if (true
 289         WIN64_ONLY(&& !UseLargePages)
 290         AIX_ONLY(&& os::vm_page_size() != 64*K)) {
 291       // Protect memory at the base of the allocated region.
 292       // If special, the page was committed (only matters on windows)
 293       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 294         fatal("cannot protect protection page");
 295       }
 296       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
 297                                  PTR_FORMAT " / " INTX_FORMAT " bytes",
 298                                  p2i(_base),
 299                                  _noaccess_prefix);
 300       assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
 301     } else {
 302       Universe::set_narrow_oop_use_implicit_null_checks(false);
 303     }
 304   }
 305 
 306   _base += _noaccess_prefix;
 307   _size -= _noaccess_prefix;
 308   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 309 }
 310 
 311 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 312 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 313 // might still fulfill the wishes of the caller.
 314 // Assures the memory is aligned to 'alignment'.
 315 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 316 void ReservedHeapSpace::try_reserve_heap(size_t size,
 317                                          size_t alignment,
 318                                          bool large,
 319                                          char* requested_address) {
 320   if (_base != NULL) {
 321     // We tried before, but we didn't like the address delivered.
 322     release();
 323   }
 324 
 325   // If OS doesn't support demand paging for large page memory, we need
 326   // to use reserve_memory_special() to reserve and pin the entire region.
 327   // If there is a backing file directory for this VirtualSpace then whether
 328   // large pages are allocated is upto the filesystem the dir resides in.
 329   // So we ignore the UseLargePages flag in this case.
 330   bool special = (_backing_fd == -1) && (large && !os::can_commit_large_page_memory());
 331   char* base = NULL;
 332 
 333   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
 334                              " heap of size " SIZE_FORMAT_HEX,
 335                              p2i(requested_address),
 336                              size);
 337 
 338   if (special) {
 339     base = os::reserve_memory_special(size, alignment, requested_address, false);
 340 
 341     if (base != NULL) {
 342       // Check alignment constraints.
 343       assert((uintptr_t) base % alignment == 0,
 344              "Large pages returned a non-aligned address, base: "
 345              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 346              p2i(base), alignment);
 347       _special = true;
 348     }
 349   }
 350 
 351   if (base == NULL) {
 352     // Failed; try to reserve regular memory below
 353     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 354                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 355       log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 356     }
 357 
 358     // Optimistically assume that the OSes returns an aligned base pointer.
 359     // When reserving a large address range, most OSes seem to align to at
 360     // least 64K.
 361 
 362     // If the memory was requested at a particular address, use
 363     // os::attempt_reserve_memory_at() to avoid over mapping something
 364     // important.  If available space is not detected, return NULL.
 365 
 366     if (requested_address != 0) {
 367       base = os::attempt_reserve_memory_at(size, requested_address, _backing_fd);
 368     } else {
 369       base = os::reserve_memory(size, NULL, alignment, _backing_fd);
 370     }
 371   }
 372   if (base == NULL) { return; }
 373 
 374   // Done
 375   _base = base;
 376   _size = size;
 377   _alignment = alignment;
 378   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 379   if (_backing_fd != -1) {
 380     _special = true;
 381   }
 382 
 383   // Check alignment constraints
 384   if ((((size_t)base) & (alignment - 1)) != 0) {
 385     // Base not aligned, retry.
 386     release();
 387   }
 388 }
 389 
 390 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 391                                           char *lowest_start,
 392                                           size_t attach_point_alignment,
 393                                           char *aligned_heap_base_min_address,
 394                                           char *upper_bound,
 395                                           size_t size,
 396                                           size_t alignment,
 397                                           bool large) {
 398   const size_t attach_range = highest_start - lowest_start;
 399   // Cap num_attempts at possible number.
 400   // At least one is possible even for 0 sized attach range.
 401   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
 402   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
 403 
 404   const size_t stepsize = (attach_range == 0) ? // Only one try.
 405     (size_t) highest_start : align_size_up(attach_range / num_attempts_to_try, attach_point_alignment);
 406 
 407   // Try attach points from top to bottom.
 408   char* attach_point = highest_start;
 409   while (attach_point >= lowest_start  &&
 410          attach_point <= highest_start &&  // Avoid wrap around.
 411          ((_base == NULL) ||
 412           (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
 413     try_reserve_heap(size, alignment, large, attach_point);
 414     attach_point -= stepsize;
 415   }
 416 }
 417 
 418 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
 419 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
 420 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
 421 
 422 // Helper for heap allocation. Returns an array with addresses
 423 // (OS-specific) which are suited for disjoint base mode. Array is
 424 // NULL terminated.
 425 static char** get_attach_addresses_for_disjoint_mode() {
 426   static uint64_t addresses[] = {
 427      2 * SIZE_32G,
 428      3 * SIZE_32G,
 429      4 * SIZE_32G,
 430      8 * SIZE_32G,
 431     10 * SIZE_32G,
 432      1 * SIZE_64K * SIZE_32G,
 433      2 * SIZE_64K * SIZE_32G,
 434      3 * SIZE_64K * SIZE_32G,
 435      4 * SIZE_64K * SIZE_32G,
 436     16 * SIZE_64K * SIZE_32G,
 437     32 * SIZE_64K * SIZE_32G,
 438     34 * SIZE_64K * SIZE_32G,
 439     0
 440   };
 441 
 442   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
 443   // the array is sorted.
 444   uint i = 0;
 445   while (addresses[i] != 0 &&
 446          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
 447     i++;
 448   }
 449   uint start = i;
 450 
 451   // Avoid more steps than requested.
 452   i = 0;
 453   while (addresses[start+i] != 0) {
 454     if (i == HeapSearchSteps) {
 455       addresses[start+i] = 0;
 456       break;
 457     }
 458     i++;
 459   }
 460 
 461   return (char**) &addresses[start];
 462 }
 463 
 464 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
 465   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
 466             "can not allocate compressed oop heap for this size");
 467   guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
 468   assert(HeapBaseMinAddress > 0, "sanity");
 469 
 470   const size_t granularity = os::vm_allocation_granularity();
 471   assert((size & (granularity - 1)) == 0,
 472          "size not aligned to os::vm_allocation_granularity()");
 473   assert((alignment & (granularity - 1)) == 0,
 474          "alignment not aligned to os::vm_allocation_granularity()");
 475   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 476          "not a power of 2");
 477 
 478   // The necessary attach point alignment for generated wish addresses.
 479   // This is needed to increase the chance of attaching for mmap and shmat.
 480   const size_t os_attach_point_alignment =
 481     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
 482     NOT_AIX(os::vm_allocation_granularity());
 483   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 484 
 485   char *aligned_heap_base_min_address = (char *)align_ptr_up((void *)HeapBaseMinAddress, alignment);
 486   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
 487     noaccess_prefix_size(alignment) : 0;
 488 
 489   // Attempt to alloc at user-given address.
 490   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 491     try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
 492     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
 493       release();
 494     }
 495   }
 496 
 497   // Keep heap at HeapBaseMinAddress.
 498   if (_base == NULL) {
 499 
 500     // Try to allocate the heap at addresses that allow efficient oop compression.
 501     // Different schemes are tried, in order of decreasing optimization potential.
 502     //
 503     // For this, try_reserve_heap() is called with the desired heap base addresses.
 504     // A call into the os layer to allocate at a given address can return memory
 505     // at a different address than requested.  Still, this might be memory at a useful
 506     // address. try_reserve_heap() always returns this allocated memory, as only here
 507     // the criteria for a good heap are checked.
 508 
 509     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 510     // Give it several tries from top of range to bottom.
 511     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
 512 
 513       // Calc address range within we try to attach (range of possible start addresses).
 514       char* const highest_start = (char *)align_ptr_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 515       char* const lowest_start  = (char *)align_ptr_up(aligned_heap_base_min_address, attach_point_alignment);
 516       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 517                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
 518     }
 519 
 520     // zerobased: Attempt to allocate in the lower 32G.
 521     // But leave room for the compressed class pointers, which is allocated above
 522     // the heap.
 523     char *zerobased_max = (char *)OopEncodingHeapMax;
 524     const size_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
 525     // For small heaps, save some space for compressed class pointer
 526     // space so it can be decoded with no base.
 527     if (UseCompressedClassPointers && !UseSharedSpaces &&
 528         OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
 529         (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
 530       zerobased_max = (char *)OopEncodingHeapMax - class_space;
 531     }
 532 
 533     // Give it several tries from top of range to bottom.
 534     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
 535         ((_base == NULL) ||                        // No previous try succeeded.
 536          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 537 
 538       // Calc address range within we try to attach (range of possible start addresses).
 539       char *const highest_start = (char *)align_ptr_down(zerobased_max - size, attach_point_alignment);
 540       // Need to be careful about size being guaranteed to be less
 541       // than UnscaledOopHeapMax due to type constraints.
 542       char *lowest_start = aligned_heap_base_min_address;
 543       uint64_t unscaled_end = UnscaledOopHeapMax - size;
 544       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
 545         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
 546       }
 547       lowest_start  = (char *)align_ptr_up(lowest_start, attach_point_alignment);
 548       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 549                         aligned_heap_base_min_address, zerobased_max, size, alignment, large);
 550     }
 551 
 552     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 553     // implement null checks.
 554     noaccess_prefix = noaccess_prefix_size(alignment);
 555 
 556     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
 557     char** addresses = get_attach_addresses_for_disjoint_mode();
 558     int i = 0;
 559     while (addresses[i] &&                                 // End of array not yet reached.
 560            ((_base == NULL) ||                             // No previous try succeeded.
 561             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 562              !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 563       char* const attach_point = addresses[i];
 564       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 565       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 566       i++;
 567     }
 568 
 569     // Last, desperate try without any placement.
 570     if (_base == NULL) {
 571       log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
 572       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 573     }
 574   }
 575 }
 576 
 577 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* backing_fs_for_heap) : ReservedSpace() {
 578 
 579   if (size == 0) {
 580     return;
 581   }
 582 
 583   if (backing_fs_for_heap != NULL) {
 584     _backing_fd = os::create_file_for_heap(backing_fs_for_heap, size);
 585     if (_backing_fd == -1) {
 586       vm_exit_during_initialization(
 587         err_msg("Could not create file for Heap at location %s", backing_fs_for_heap));
 588     }
 589   }
 590 
 591   // Heap size should be aligned to alignment, too.
 592   guarantee(is_size_aligned(size, alignment), "set by caller");
 593 
 594   if (UseCompressedOops) {
 595     initialize_compressed_heap(size, alignment, large);
 596     if (_size > size) {
 597       // We allocated heap with noaccess prefix.
 598       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 599       // if we had to try at arbitrary address.
 600       establish_noaccess_prefix();
 601     }
 602   } else {
 603     initialize(size, alignment, large, NULL, false);
 604   }
 605 
 606   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 607          "area must be distinguishable from marks for mark-sweep");
 608   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 609          "area must be distinguishable from marks for mark-sweep");
 610 
 611   if (base() > 0) {
 612     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 613   }
 614 
 615   if (backing_fs_for_heap != NULL) {
 616     os::close(_backing_fd);
 617   }
 618 }
 619 
 620 // Reserve space for code segment.  Same as Java heap only we mark this as
 621 // executable.
 622 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 623                                      size_t rs_align,
 624                                      bool large) :
 625   ReservedSpace(r_size, rs_align, large, /*executable*/ CodeCacheExtensions::support_dynamic_code()) {
 626   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 627 }
 628 
 629 // VirtualSpace
 630 
 631 VirtualSpace::VirtualSpace() {
 632   _low_boundary           = NULL;
 633   _high_boundary          = NULL;
 634   _low                    = NULL;
 635   _high                   = NULL;
 636   _lower_high             = NULL;
 637   _middle_high            = NULL;
 638   _upper_high             = NULL;
 639   _lower_high_boundary    = NULL;
 640   _middle_high_boundary   = NULL;
 641   _upper_high_boundary    = NULL;
 642   _lower_alignment        = 0;
 643   _middle_alignment       = 0;
 644   _upper_alignment        = 0;
 645   _special                = false;
 646   _executable             = false;
 647 }
 648 
 649 
 650 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 651   const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
 652   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 653 }
 654 
 655 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 656   if(!rs.is_reserved()) return false;  // allocation failed.
 657   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 658   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 659 
 660   _low_boundary  = rs.base();
 661   _high_boundary = low_boundary() + rs.size();
 662 
 663   _low = low_boundary();
 664   _high = low();
 665 
 666   _special = rs.special();
 667   _executable = rs.executable();
 668 
 669   // When a VirtualSpace begins life at a large size, make all future expansion
 670   // and shrinking occur aligned to a granularity of large pages.  This avoids
 671   // fragmentation of physical addresses that inhibits the use of large pages
 672   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 673   // page size, the only spaces that get handled this way are codecache and
 674   // the heap itself, both of which provide a substantial performance
 675   // boost in many benchmarks when covered by large pages.
 676   //
 677   // No attempt is made to force large page alignment at the very top and
 678   // bottom of the space if they are not aligned so already.
 679   _lower_alignment  = os::vm_page_size();
 680   _middle_alignment = max_commit_granularity;
 681   _upper_alignment  = os::vm_page_size();
 682 
 683   // End of each region
 684   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
 685   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
 686   _upper_high_boundary = high_boundary();
 687 
 688   // High address of each region
 689   _lower_high = low_boundary();
 690   _middle_high = lower_high_boundary();
 691   _upper_high = middle_high_boundary();
 692 
 693   // commit to initial size
 694   if (committed_size > 0) {
 695     if (!expand_by(committed_size)) {
 696       return false;
 697     }
 698   }
 699   return true;
 700 }
 701 
 702 
 703 VirtualSpace::~VirtualSpace() {
 704   release();
 705 }
 706 
 707 
 708 void VirtualSpace::release() {
 709   // This does not release memory it reserved.
 710   // Caller must release via rs.release();
 711   _low_boundary           = NULL;
 712   _high_boundary          = NULL;
 713   _low                    = NULL;
 714   _high                   = NULL;
 715   _lower_high             = NULL;
 716   _middle_high            = NULL;
 717   _upper_high             = NULL;
 718   _lower_high_boundary    = NULL;
 719   _middle_high_boundary   = NULL;
 720   _upper_high_boundary    = NULL;
 721   _lower_alignment        = 0;
 722   _middle_alignment       = 0;
 723   _upper_alignment        = 0;
 724   _special                = false;
 725   _executable             = false;
 726 }
 727 
 728 
 729 size_t VirtualSpace::committed_size() const {
 730   return pointer_delta(high(), low(), sizeof(char));
 731 }
 732 
 733 
 734 size_t VirtualSpace::reserved_size() const {
 735   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 736 }
 737 
 738 
 739 size_t VirtualSpace::uncommitted_size()  const {
 740   return reserved_size() - committed_size();
 741 }
 742 
 743 size_t VirtualSpace::actual_committed_size() const {
 744   // Special VirtualSpaces commit all reserved space up front.
 745   if (special()) {
 746     return reserved_size();
 747   }
 748 
 749   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 750   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 751   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 752 
 753 #ifdef ASSERT
 754   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 755   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 756   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 757 
 758   if (committed_high > 0) {
 759     assert(committed_low == lower, "Must be");
 760     assert(committed_middle == middle, "Must be");
 761   }
 762 
 763   if (committed_middle > 0) {
 764     assert(committed_low == lower, "Must be");
 765   }
 766   if (committed_middle < middle) {
 767     assert(committed_high == 0, "Must be");
 768   }
 769 
 770   if (committed_low < lower) {
 771     assert(committed_high == 0, "Must be");
 772     assert(committed_middle == 0, "Must be");
 773   }
 774 #endif
 775 
 776   return committed_low + committed_middle + committed_high;
 777 }
 778 
 779 
 780 bool VirtualSpace::contains(const void* p) const {
 781   return low() <= (const char*) p && (const char*) p < high();
 782 }
 783 
 784 static void pretouch_expanded_memory(void* start, void* end) {
 785   assert(is_ptr_aligned(start, os::vm_page_size()), "Unexpected alignment");
 786   assert(is_ptr_aligned(end,   os::vm_page_size()), "Unexpected alignment");
 787 
 788   os::pretouch_memory(start, end);
 789 }
 790 
 791 static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) {
 792   if (os::commit_memory(start, size, alignment, executable)) {
 793     if (pre_touch || AlwaysPreTouch) {
 794       pretouch_expanded_memory(start, start + size);
 795     }
 796     return true;
 797   }
 798 
 799   debug_only(warning(
 800       "INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMAT
 801       " size=" SIZE_FORMAT ", executable=%d) failed",
 802       p2i(start), p2i(start + size), size, executable);)
 803 
 804   return false;
 805 }
 806 
 807 /*
 808    First we need to determine if a particular virtual space is using large
 809    pages.  This is done at the initialize function and only virtual spaces
 810    that are larger than LargePageSizeInBytes use large pages.  Once we
 811    have determined this, all expand_by and shrink_by calls must grow and
 812    shrink by large page size chunks.  If a particular request
 813    is within the current large page, the call to commit and uncommit memory
 814    can be ignored.  In the case that the low and high boundaries of this
 815    space is not large page aligned, the pages leading to the first large
 816    page address and the pages after the last large page address must be
 817    allocated with default pages.
 818 */
 819 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 820   if (uncommitted_size() < bytes) {
 821     return false;
 822   }
 823 
 824   if (special()) {
 825     // don't commit memory if the entire space is pinned in memory
 826     _high += bytes;
 827     return true;
 828   }
 829 
 830   char* previous_high = high();
 831   char* unaligned_new_high = high() + bytes;
 832   assert(unaligned_new_high <= high_boundary(), "cannot expand by more than upper boundary");
 833 
 834   // Calculate where the new high for each of the regions should be.  If
 835   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 836   // then the unaligned lower and upper new highs would be the
 837   // lower_high() and upper_high() respectively.
 838   char* unaligned_lower_new_high =  MIN2(unaligned_new_high, lower_high_boundary());
 839   char* unaligned_middle_new_high = MIN2(unaligned_new_high, middle_high_boundary());
 840   char* unaligned_upper_new_high =  MIN2(unaligned_new_high, upper_high_boundary());
 841 
 842   // Align the new highs based on the regions alignment.  lower and upper
 843   // alignment will always be default page size.  middle alignment will be
 844   // LargePageSizeInBytes if the actual size of the virtual space is in
 845   // fact larger than LargePageSizeInBytes.
 846   char* aligned_lower_new_high =  (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 847   char* aligned_middle_new_high = (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 848   char* aligned_upper_new_high =  (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 849 
 850   // Determine which regions need to grow in this expand_by call.
 851   // If you are growing in the lower region, high() must be in that
 852   // region so calculate the size based on high().  For the middle and
 853   // upper regions, determine the starting point of growth based on the
 854   // location of high().  By getting the MAX of the region's low address
 855   // (or the previous region's high address) and high(), we can tell if it
 856   // is an intra or inter region growth.
 857   size_t lower_needs = 0;
 858   if (aligned_lower_new_high > lower_high()) {
 859     lower_needs = pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 860   }
 861   size_t middle_needs = 0;
 862   if (aligned_middle_new_high > middle_high()) {
 863     middle_needs = pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 864   }
 865   size_t upper_needs = 0;
 866   if (aligned_upper_new_high > upper_high()) {
 867     upper_needs = pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 868   }
 869 
 870   // Check contiguity.
 871   assert(low_boundary() <= lower_high() && lower_high() <= lower_high_boundary(),
 872          "high address must be contained within the region");
 873   assert(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary(),
 874          "high address must be contained within the region");
 875   assert(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary(),
 876          "high address must be contained within the region");
 877 
 878   // Commit regions
 879   if (lower_needs > 0) {
 880     assert(lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region");
 881     if (!commit_expanded(lower_high(), lower_needs, _lower_alignment, pre_touch, _executable)) {
 882       return false;
 883     }
 884     _lower_high += lower_needs;
 885   }
 886 
 887   if (middle_needs > 0) {
 888     assert(middle_high() + middle_needs <= middle_high_boundary(), "must not expand beyond region");
 889     if (!commit_expanded(middle_high(), middle_needs, _middle_alignment, pre_touch, _executable)) {
 890       return false;
 891     }
 892     _middle_high += middle_needs;
 893   }
 894 
 895   if (upper_needs > 0) {
 896     assert(upper_high() + upper_needs <= upper_high_boundary(), "must not expand beyond region");
 897     if (!commit_expanded(upper_high(), upper_needs, _upper_alignment, pre_touch, _executable)) {
 898       return false;
 899     }
 900     _upper_high += upper_needs;
 901   }
 902 
 903   _high += bytes;
 904   return true;
 905 }
 906 
 907 // A page is uncommitted if the contents of the entire page is deemed unusable.
 908 // Continue to decrement the high() pointer until it reaches a page boundary
 909 // in which case that particular page can now be uncommitted.
 910 void VirtualSpace::shrink_by(size_t size) {
 911   if (committed_size() < size)
 912     fatal("Cannot shrink virtual space to negative size");
 913 
 914   if (special()) {
 915     // don't uncommit if the entire space is pinned in memory
 916     _high -= size;
 917     return;
 918   }
 919 
 920   char* unaligned_new_high = high() - size;
 921   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 922 
 923   // Calculate new unaligned address
 924   char* unaligned_upper_new_high =
 925     MAX2(unaligned_new_high, middle_high_boundary());
 926   char* unaligned_middle_new_high =
 927     MAX2(unaligned_new_high, lower_high_boundary());
 928   char* unaligned_lower_new_high =
 929     MAX2(unaligned_new_high, low_boundary());
 930 
 931   // Align address to region's alignment
 932   char* aligned_upper_new_high =
 933     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 934   char* aligned_middle_new_high =
 935     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 936   char* aligned_lower_new_high =
 937     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 938 
 939   // Determine which regions need to shrink
 940   size_t upper_needs = 0;
 941   if (aligned_upper_new_high < upper_high()) {
 942     upper_needs =
 943       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 944   }
 945   size_t middle_needs = 0;
 946   if (aligned_middle_new_high < middle_high()) {
 947     middle_needs =
 948       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 949   }
 950   size_t lower_needs = 0;
 951   if (aligned_lower_new_high < lower_high()) {
 952     lower_needs =
 953       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 954   }
 955 
 956   // Check contiguity.
 957   assert(middle_high_boundary() <= upper_high() &&
 958          upper_high() <= upper_high_boundary(),
 959          "high address must be contained within the region");
 960   assert(lower_high_boundary() <= middle_high() &&
 961          middle_high() <= middle_high_boundary(),
 962          "high address must be contained within the region");
 963   assert(low_boundary() <= lower_high() &&
 964          lower_high() <= lower_high_boundary(),
 965          "high address must be contained within the region");
 966 
 967   // Uncommit
 968   if (upper_needs > 0) {
 969     assert(middle_high_boundary() <= aligned_upper_new_high &&
 970            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 971            "must not shrink beyond region");
 972     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 973       debug_only(warning("os::uncommit_memory failed"));
 974       return;
 975     } else {
 976       _upper_high -= upper_needs;
 977     }
 978   }
 979   if (middle_needs > 0) {
 980     assert(lower_high_boundary() <= aligned_middle_new_high &&
 981            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 982            "must not shrink beyond region");
 983     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 984       debug_only(warning("os::uncommit_memory failed"));
 985       return;
 986     } else {
 987       _middle_high -= middle_needs;
 988     }
 989   }
 990   if (lower_needs > 0) {
 991     assert(low_boundary() <= aligned_lower_new_high &&
 992            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 993            "must not shrink beyond region");
 994     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 995       debug_only(warning("os::uncommit_memory failed"));
 996       return;
 997     } else {
 998       _lower_high -= lower_needs;
 999     }
1000   }
1001 
1002   _high -= size;
1003 }
1004 
1005 #ifndef PRODUCT
1006 void VirtualSpace::check_for_contiguity() {
1007   // Check contiguity.
1008   assert(low_boundary() <= lower_high() &&
1009          lower_high() <= lower_high_boundary(),
1010          "high address must be contained within the region");
1011   assert(lower_high_boundary() <= middle_high() &&
1012          middle_high() <= middle_high_boundary(),
1013          "high address must be contained within the region");
1014   assert(middle_high_boundary() <= upper_high() &&
1015          upper_high() <= upper_high_boundary(),
1016          "high address must be contained within the region");
1017   assert(low() >= low_boundary(), "low");
1018   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
1019   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
1020   assert(high() <= upper_high(), "upper high");
1021 }
1022 
1023 void VirtualSpace::print_on(outputStream* out) {
1024   out->print   ("Virtual space:");
1025   if (special()) out->print(" (pinned in memory)");
1026   out->cr();
1027   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
1028   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
1029   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low()), p2i(high()));
1030   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low_boundary()), p2i(high_boundary()));
1031 }
1032 
1033 void VirtualSpace::print() {
1034   print_on(tty);
1035 }
1036 
1037 /////////////// Unit tests ///////////////
1038 
1039 #ifndef PRODUCT
1040 
1041 #define test_log(...) \
1042   do {\
1043     if (VerboseInternalVMTests) { \
1044       tty->print_cr(__VA_ARGS__); \
1045       tty->flush(); \
1046     }\
1047   } while (false)
1048 
1049 class TestReservedSpace : AllStatic {
1050  public:
1051   static void small_page_write(void* addr, size_t size) {
1052     size_t page_size = os::vm_page_size();
1053 
1054     char* end = (char*)addr + size;
1055     for (char* p = (char*)addr; p < end; p += page_size) {
1056       *p = 1;
1057     }
1058   }
1059 
1060   static void release_memory_for_test(ReservedSpace rs) {
1061     if (rs.special()) {
1062       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1063     } else {
1064       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1065     }
1066   }
1067 
1068   static void test_reserved_space1(size_t size, size_t alignment) {
1069     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
1070 
1071     assert(is_size_aligned(size, alignment), "Incorrect input parameters");
1072 
1073     ReservedSpace rs(size,          // size
1074                      alignment,     // alignment
1075                      UseLargePages, // large
1076                      (char *)NULL); // requested_address
1077 
1078     test_log(" rs.special() == %d", rs.special());
1079 
1080     assert(rs.base() != NULL, "Must be");
1081     assert(rs.size() == size, "Must be");
1082 
1083     assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1084     assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1085 
1086     if (rs.special()) {
1087       small_page_write(rs.base(), size);
1088     }
1089 
1090     release_memory_for_test(rs);
1091   }
1092 
1093   static void test_reserved_space2(size_t size) {
1094     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
1095 
1096     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1097 
1098     ReservedSpace rs(size);
1099 
1100     test_log(" rs.special() == %d", rs.special());
1101 
1102     assert(rs.base() != NULL, "Must be");
1103     assert(rs.size() == size, "Must be");
1104 
1105     if (rs.special()) {
1106       small_page_write(rs.base(), size);
1107     }
1108 
1109     release_memory_for_test(rs);
1110   }
1111 
1112   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1113     test_log("test_reserved_space3(%p, %p, %d)",
1114         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
1115 
1116     if (size < alignment) {
1117       // Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test.
1118       assert((size_t)os::vm_page_size() == os::large_page_size(), "Test needs further refinement");
1119       return;
1120     }
1121 
1122     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1123     assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
1124 
1125     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1126 
1127     ReservedSpace rs(size, alignment, large, false);
1128 
1129     test_log(" rs.special() == %d", rs.special());
1130 
1131     assert(rs.base() != NULL, "Must be");
1132     assert(rs.size() == size, "Must be");
1133 
1134     if (rs.special()) {
1135       small_page_write(rs.base(), size);
1136     }
1137 
1138     release_memory_for_test(rs);
1139   }
1140 
1141 
1142   static void test_reserved_space1() {
1143     size_t size = 2 * 1024 * 1024;
1144     size_t ag   = os::vm_allocation_granularity();
1145 
1146     test_reserved_space1(size,      ag);
1147     test_reserved_space1(size * 2,  ag);
1148     test_reserved_space1(size * 10, ag);
1149   }
1150 
1151   static void test_reserved_space2() {
1152     size_t size = 2 * 1024 * 1024;
1153     size_t ag = os::vm_allocation_granularity();
1154 
1155     test_reserved_space2(size * 1);
1156     test_reserved_space2(size * 2);
1157     test_reserved_space2(size * 10);
1158     test_reserved_space2(ag);
1159     test_reserved_space2(size - ag);
1160     test_reserved_space2(size);
1161     test_reserved_space2(size + ag);
1162     test_reserved_space2(size * 2);
1163     test_reserved_space2(size * 2 - ag);
1164     test_reserved_space2(size * 2 + ag);
1165     test_reserved_space2(size * 3);
1166     test_reserved_space2(size * 3 - ag);
1167     test_reserved_space2(size * 3 + ag);
1168     test_reserved_space2(size * 10);
1169     test_reserved_space2(size * 10 + size / 2);
1170   }
1171 
1172   static void test_reserved_space3() {
1173     size_t ag = os::vm_allocation_granularity();
1174 
1175     test_reserved_space3(ag,      ag    , false);
1176     test_reserved_space3(ag * 2,  ag    , false);
1177     test_reserved_space3(ag * 3,  ag    , false);
1178     test_reserved_space3(ag * 2,  ag * 2, false);
1179     test_reserved_space3(ag * 4,  ag * 2, false);
1180     test_reserved_space3(ag * 8,  ag * 2, false);
1181     test_reserved_space3(ag * 4,  ag * 4, false);
1182     test_reserved_space3(ag * 8,  ag * 4, false);
1183     test_reserved_space3(ag * 16, ag * 4, false);
1184 
1185     if (UseLargePages) {
1186       size_t lp = os::large_page_size();
1187 
1188       // Without large pages
1189       test_reserved_space3(lp,     ag * 4, false);
1190       test_reserved_space3(lp * 2, ag * 4, false);
1191       test_reserved_space3(lp * 4, ag * 4, false);
1192       test_reserved_space3(lp,     lp    , false);
1193       test_reserved_space3(lp * 2, lp    , false);
1194       test_reserved_space3(lp * 3, lp    , false);
1195       test_reserved_space3(lp * 2, lp * 2, false);
1196       test_reserved_space3(lp * 4, lp * 2, false);
1197       test_reserved_space3(lp * 8, lp * 2, false);
1198 
1199       // With large pages
1200       test_reserved_space3(lp, ag * 4    , true);
1201       test_reserved_space3(lp * 2, ag * 4, true);
1202       test_reserved_space3(lp * 4, ag * 4, true);
1203       test_reserved_space3(lp, lp        , true);
1204       test_reserved_space3(lp * 2, lp    , true);
1205       test_reserved_space3(lp * 3, lp    , true);
1206       test_reserved_space3(lp * 2, lp * 2, true);
1207       test_reserved_space3(lp * 4, lp * 2, true);
1208       test_reserved_space3(lp * 8, lp * 2, true);
1209     }
1210   }
1211 
1212   static void test_reserved_space() {
1213     test_reserved_space1();
1214     test_reserved_space2();
1215     test_reserved_space3();
1216   }
1217 };
1218 
1219 void TestReservedSpace_test() {
1220   TestReservedSpace::test_reserved_space();
1221 }
1222 
1223 #define assert_equals(actual, expected)  \
1224   assert(actual == expected,             \
1225          "Got " SIZE_FORMAT " expected " \
1226          SIZE_FORMAT, actual, expected);
1227 
1228 #define assert_ge(value1, value2)                  \
1229   assert(value1 >= value2,                         \
1230          "'" #value1 "': " SIZE_FORMAT " '"        \
1231          #value2 "': " SIZE_FORMAT, value1, value2);
1232 
1233 #define assert_lt(value1, value2)                  \
1234   assert(value1 < value2,                          \
1235          "'" #value1 "': " SIZE_FORMAT " '"        \
1236          #value2 "': " SIZE_FORMAT, value1, value2);
1237 
1238 
1239 class TestVirtualSpace : AllStatic {
1240   enum TestLargePages {
1241     Default,
1242     Disable,
1243     Reserve,
1244     Commit
1245   };
1246 
1247   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
1248     switch(mode) {
1249     default:
1250     case Default:
1251     case Reserve:
1252       return ReservedSpace(reserve_size_aligned);
1253     case Disable:
1254     case Commit:
1255       return ReservedSpace(reserve_size_aligned,
1256                            os::vm_allocation_granularity(),
1257                            /* large */ false, /* exec */ false);
1258     }
1259   }
1260 
1261   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1262     switch(mode) {
1263     default:
1264     case Default:
1265     case Reserve:
1266       return vs.initialize(rs, 0);
1267     case Disable:
1268       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1269     case Commit:
1270       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1271     }
1272   }
1273 
1274  public:
1275   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1276                                                         TestLargePages mode = Default) {
1277     size_t granularity = os::vm_allocation_granularity();
1278     size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
1279 
1280     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1281 
1282     assert(reserved.is_reserved(), "Must be");
1283 
1284     VirtualSpace vs;
1285     bool initialized = initialize_virtual_space(vs, reserved, mode);
1286     assert(initialized, "Failed to initialize VirtualSpace");
1287 
1288     vs.expand_by(commit_size, false);
1289 
1290     if (vs.special()) {
1291       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1292     } else {
1293       assert_ge(vs.actual_committed_size(), commit_size);
1294       // Approximate the commit granularity.
1295       // Make sure that we don't commit using large pages
1296       // if large pages has been disabled for this VirtualSpace.
1297       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1298                                    os::vm_page_size() : os::large_page_size();
1299       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1300     }
1301 
1302     reserved.release();
1303   }
1304 
1305   static void test_virtual_space_actual_committed_space_one_large_page() {
1306     if (!UseLargePages) {
1307       return;
1308     }
1309 
1310     size_t large_page_size = os::large_page_size();
1311 
1312     ReservedSpace reserved(large_page_size, large_page_size, true, false);
1313 
1314     assert(reserved.is_reserved(), "Must be");
1315 
1316     VirtualSpace vs;
1317     bool initialized = vs.initialize(reserved, 0);
1318     assert(initialized, "Failed to initialize VirtualSpace");
1319 
1320     vs.expand_by(large_page_size, false);
1321 
1322     assert_equals(vs.actual_committed_size(), large_page_size);
1323 
1324     reserved.release();
1325   }
1326 
1327   static void test_virtual_space_actual_committed_space() {
1328     test_virtual_space_actual_committed_space(4 * K, 0);
1329     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1330     test_virtual_space_actual_committed_space(8 * K, 0);
1331     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1332     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1333     test_virtual_space_actual_committed_space(12 * K, 0);
1334     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1335     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1336     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1337     test_virtual_space_actual_committed_space(64 * K, 0);
1338     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1339     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1340     test_virtual_space_actual_committed_space(2 * M, 0);
1341     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1342     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1343     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1344     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1345     test_virtual_space_actual_committed_space(10 * M, 0);
1346     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1347     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1348     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1349     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1350     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1351     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1352   }
1353 
1354   static void test_virtual_space_disable_large_pages() {
1355     if (!UseLargePages) {
1356       return;
1357     }
1358     // These test cases verify that if we force VirtualSpace to disable large pages
1359     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1360     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1361     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1362     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1363     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1364     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1365     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1366 
1367     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1368     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1369     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1370     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1371     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1372     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1373     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1374 
1375     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1376     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1377     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1378     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1379     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1380     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1381     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1382   }
1383 
1384   static void test_virtual_space() {
1385     test_virtual_space_actual_committed_space();
1386     test_virtual_space_actual_committed_space_one_large_page();
1387     test_virtual_space_disable_large_pages();
1388   }
1389 };
1390 
1391 void TestVirtualSpace_test() {
1392   TestVirtualSpace::test_virtual_space();
1393 }
1394 
1395 #endif // PRODUCT
1396 
1397 #endif