1 /*
   2  * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeCacheExtensions.hpp"
  27 #include "logging/log.hpp"
  28 #include "memory/resourceArea.hpp"
  29 #include "memory/virtualspace.hpp"
  30 #include "oops/markOop.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "services/memTracker.hpp"
  33 
  34 // ReservedSpace
  35 
  36 // Dummy constructor
  37 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  38     _alignment(0), _special(false), _executable(false), _backing_fd(-1) {
  39 }
  40 
  41 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _backing_fd(-1) {
  42   bool has_preferred_page_size = preferred_page_size != 0;
  43   // Want to use large pages where possible and pad with small pages.
  44   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
  45   bool large_pages = page_size != (size_t)os::vm_page_size();
  46   size_t alignment;
  47   if (large_pages && has_preferred_page_size) {
  48     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  49     // ReservedSpace initialization requires size to be aligned to the given
  50     // alignment. Align the size up.
  51     size = align_size_up(size, alignment);
  52   } else {
  53     // Don't force the alignment to be large page aligned,
  54     // since that will waste memory.
  55     alignment = os::vm_allocation_granularity();
  56   }
  57   initialize(size, alignment, large_pages, NULL, false);
  58 }
  59 
  60 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  61                              bool large,
  62                              char* requested_address) : _backing_fd(-1) {
  63   initialize(size, alignment, large, requested_address, false);
  64 }
  65 
  66 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  67                              bool large,
  68                              bool executable) : _backing_fd(-1) {
  69   initialize(size, alignment, large, NULL, executable);
  70 }
  71 
  72 // Helper method.
  73 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  74                                            const size_t size, bool special)
  75 {
  76   if (base == requested_address || requested_address == NULL)
  77     return false; // did not fail
  78 
  79   if (base != NULL) {
  80     // Different reserve address may be acceptable in other cases
  81     // but for compressed oops heap should be at requested address.
  82     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  83     log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
  84     // OS ignored requested address. Try different address.
  85     if (special) {
  86       if (!os::release_memory_special(base, size)) {
  87         fatal("os::release_memory_special failed");
  88       }
  89     } else {
  90       if (!os::release_memory(base, size)) {
  91         fatal("os::release_memory failed");
  92       }
  93     }
  94   }
  95   return true;
  96 }
  97 
  98 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
  99                                char* requested_address,
 100                                bool executable) {
 101   const size_t granularity = os::vm_allocation_granularity();
 102   assert((size & (granularity - 1)) == 0,
 103          "size not aligned to os::vm_allocation_granularity()");
 104   assert((alignment & (granularity - 1)) == 0,
 105          "alignment not aligned to os::vm_allocation_granularity()");
 106   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 107          "not a power of 2");
 108 
 109   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 110 
 111   _base = NULL;
 112   _size = 0;
 113   _special = false;
 114   _executable = executable;
 115   _alignment = 0;
 116   _noaccess_prefix = 0;
 117   if (size == 0) {
 118     return;
 119   }
 120 
 121   // If OS doesn't support demand paging for large page memory, we need
 122   // to use reserve_memory_special() to reserve and pin the entire region.
 123   // If there is a backing file directory for this VirtualSpace then whether largepages are allocated is upto the filesystem the dir resides in.
 124   // So we ignore the UseLargePages flag in this case.
 125   bool special = (_backing_fd == -1) && (large && !os::can_commit_large_page_memory());
 126   char* base = NULL;
 127 
 128   if (special) {
 129 
 130     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 131 
 132     if (base != NULL) {
 133       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 134         // OS ignored requested address. Try different address.
 135         return;
 136       }
 137       // Check alignment constraints.
 138       assert((uintptr_t) base % alignment == 0,
 139              "Large pages returned a non-aligned address, base: "
 140              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 141              p2i(base), alignment);
 142       _special = true;
 143     } else {
 144       // failed; try to reserve regular memory below
 145       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 146                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 147         log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 148       }
 149     }
 150   }
 151 
 152   if (base == NULL) {
 153     // Optimistically assume that the OSes returns an aligned base pointer.
 154     // When reserving a large address range, most OSes seem to align to at
 155     // least 64K.
 156 
 157     // If the memory was requested at a particular address, use
 158     // os::attempt_reserve_memory_at() to avoid over mapping something
 159     // important.  If available space is not detected, return NULL.
 160 
 161     if (requested_address != 0) {
 162       base = os::attempt_reserve_memory_at(size, requested_address, _backing_fd);
 163       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 164         // OS ignored requested address. Try different address.
 165         base = NULL;
 166       }
 167     } else {
 168       base = os::reserve_memory(size, NULL, alignment, _backing_fd);
 169     }
 170 
 171     if (base == NULL) return;
 172 
 173     // Check alignment constraints
 174     if ((((size_t)base) & (alignment - 1)) != 0) {
 175       // Base not aligned, retry
 176       if (_backing_fd != -1) {
 177         if (!os::unmap_memory(base, size)) fatal("os::release_memory failed");
 178       }
 179       else {
 180         if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 181       }
 182       // Make sure that size is aligned
 183       size = align_size_up(size, alignment);
 184       base = os::reserve_memory_aligned(size, alignment, _backing_fd);
 185 
 186       if (requested_address != 0 &&
 187           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 188         // As a result of the alignment constraints, the allocated base differs
 189         // from the requested address. Return back to the caller who can
 190         // take remedial action (like try again without a requested address).
 191         assert(_base == NULL, "should be");
 192         return;
 193       }
 194     }
 195   }
 196   // Done
 197   _base = base;
 198   _size = size;
 199   _alignment = alignment;
 200   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 201   if (_backing_fd != -1) {
 202     _special = true;
 203   }
 204 }
 205 
 206 
 207 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 208                              bool special, bool executable) {
 209   assert((size % os::vm_allocation_granularity()) == 0,
 210          "size not allocation aligned");
 211   _base = base;
 212   _size = size;
 213   _alignment = alignment;
 214   _noaccess_prefix = 0;
 215   _special = special;
 216   _executable = executable;
 217 }
 218 
 219 
 220 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 221                                         bool split, bool realloc) {
 222   assert(partition_size <= size(), "partition failed");
 223   if (split) {
 224     os::split_reserved_memory(base(), size(), partition_size, realloc);
 225   }
 226   ReservedSpace result(base(), partition_size, alignment, special(),
 227                        executable());
 228   return result;
 229 }
 230 
 231 
 232 ReservedSpace
 233 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 234   assert(partition_size <= size(), "partition failed");
 235   ReservedSpace result(base() + partition_size, size() - partition_size,
 236                        alignment, special(), executable());
 237   return result;
 238 }
 239 
 240 
 241 size_t ReservedSpace::page_align_size_up(size_t size) {
 242   return align_size_up(size, os::vm_page_size());
 243 }
 244 
 245 
 246 size_t ReservedSpace::page_align_size_down(size_t size) {
 247   return align_size_down(size, os::vm_page_size());
 248 }
 249 
 250 
 251 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 252   return align_size_up(size, os::vm_allocation_granularity());
 253 }
 254 
 255 
 256 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 257   return align_size_down(size, os::vm_allocation_granularity());
 258 }
 259 
 260 
 261 void ReservedSpace::release() {
 262   if (is_reserved()) {
 263     char *real_base = _base - _noaccess_prefix;
 264     const size_t real_size = _size + _noaccess_prefix;
 265     if (special()) {
 266       os::release_memory_special(real_base, real_size);
 267     } else{
 268       os::release_memory(real_base, real_size);
 269     }
 270     _base = NULL;
 271     _size = 0;
 272     _noaccess_prefix = 0;
 273     _alignment = 0;
 274     _special = false;
 275     _executable = false;
 276   }
 277 }
 278 
 279 static size_t noaccess_prefix_size(size_t alignment) {
 280   return lcm(os::vm_page_size(), alignment);
 281 }
 282 
 283 void ReservedHeapSpace::establish_noaccess_prefix() {
 284   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 285   _noaccess_prefix = noaccess_prefix_size(_alignment);
 286 
 287   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 288     if (true
 289         WIN64_ONLY(&& !UseLargePages)
 290         AIX_ONLY(&& os::vm_page_size() != SIZE_64K)) {
 291       // Protect memory at the base of the allocated region.
 292       // If special, the page was committed (only matters on windows)
 293       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 294         fatal("cannot protect protection page");
 295       }
 296       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
 297                                  PTR_FORMAT " / " INTX_FORMAT " bytes",
 298                                  p2i(_base),
 299                                  _noaccess_prefix);
 300       assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
 301     } else {
 302       Universe::set_narrow_oop_use_implicit_null_checks(false);
 303     }
 304   }
 305 
 306   _base += _noaccess_prefix;
 307   _size -= _noaccess_prefix;
 308   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 309 }
 310 
 311 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 312 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 313 // might still fulfill the wishes of the caller.
 314 // Assures the memory is aligned to 'alignment'.
 315 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 316 void ReservedHeapSpace::try_reserve_heap(size_t size,
 317                                          size_t alignment,
 318                                          bool large,
 319                                          char* requested_address) {
 320   if (_base != NULL) {
 321     // We tried before, but we didn't like the address delivered.
 322     release();
 323   }
 324 
 325   // If OS doesn't support demand paging for large page memory, we need
 326   // to use reserve_memory_special() to reserve and pin the entire region.
 327   // If there is a backing file directory for this VirtualSpace then whether largepages are allocated is upto the filesystem the dir resides in.
 328   // So we ignore the UseLargePages flag in this case.
 329   bool special = (_backing_fd == -1) && (large && !os::can_commit_large_page_memory());
 330   char* base = NULL;
 331 
 332   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
 333                              " heap of size " SIZE_FORMAT_HEX,
 334                              p2i(requested_address),
 335                              size);
 336 
 337   if (special) {
 338     base = os::reserve_memory_special(size, alignment, requested_address, false);
 339 
 340     if (base != NULL) {
 341       // Check alignment constraints.
 342       assert((uintptr_t) base % alignment == 0,
 343              "Large pages returned a non-aligned address, base: "
 344              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 345              p2i(base), alignment);
 346       _special = true;
 347     }
 348   }
 349 
 350   if (base == NULL) {
 351     // Failed; try to reserve regular memory below
 352     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 353                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 354       log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 355     }
 356 
 357     // Optimistically assume that the OSes returns an aligned base pointer.
 358     // When reserving a large address range, most OSes seem to align to at
 359     // least 64K.
 360 
 361     // If the memory was requested at a particular address, use
 362     // os::attempt_reserve_memory_at() to avoid over mapping something
 363     // important.  If available space is not detected, return NULL.
 364 
 365     if (requested_address != 0) {
 366       base = os::attempt_reserve_memory_at(size, requested_address, _backing_fd);
 367     } else {
 368       base = os::reserve_memory(size, NULL, alignment, _backing_fd);
 369     }
 370   }
 371   if (base == NULL) { return; }
 372 
 373   // Done
 374   _base = base;
 375   _size = size;
 376   _alignment = alignment;
 377   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 378   if (_backing_fd != -1) {
 379     _special = true;
 380   }
 381 
 382   // Check alignment constraints
 383   if ((((size_t)base) & (alignment - 1)) != 0) {
 384     // Base not aligned, retry.
 385     release();
 386   }
 387 }
 388 
 389 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 390                                           char *lowest_start,
 391                                           size_t attach_point_alignment,
 392                                           char *aligned_heap_base_min_address,
 393                                           char *upper_bound,
 394                                           size_t size,
 395                                           size_t alignment,
 396                                           bool large) {
 397   const size_t attach_range = highest_start - lowest_start;
 398   // Cap num_attempts at possible number.
 399   // At least one is possible even for 0 sized attach range.
 400   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
 401   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
 402 
 403   const size_t stepsize = (attach_range == 0) ? // Only one try.
 404     (size_t) highest_start : align_size_up(attach_range / num_attempts_to_try, attach_point_alignment);
 405 
 406   // Try attach points from top to bottom.
 407   char* attach_point = highest_start;
 408   while (attach_point >= lowest_start  &&
 409          attach_point <= highest_start &&  // Avoid wrap around.
 410          ((_base == NULL) ||
 411           (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
 412     try_reserve_heap(size, alignment, large, attach_point);
 413     attach_point -= stepsize;
 414   }
 415 }
 416 
 417 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
 418 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
 419 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
 420 
 421 // Helper for heap allocation. Returns an array with addresses
 422 // (OS-specific) which are suited for disjoint base mode. Array is
 423 // NULL terminated.
 424 static char** get_attach_addresses_for_disjoint_mode() {
 425   static uint64_t addresses[] = {
 426      2 * SIZE_32G,
 427      3 * SIZE_32G,
 428      4 * SIZE_32G,
 429      8 * SIZE_32G,
 430     10 * SIZE_32G,
 431      1 * SIZE_64K * SIZE_32G,
 432      2 * SIZE_64K * SIZE_32G,
 433      3 * SIZE_64K * SIZE_32G,
 434      4 * SIZE_64K * SIZE_32G,
 435     16 * SIZE_64K * SIZE_32G,
 436     32 * SIZE_64K * SIZE_32G,
 437     34 * SIZE_64K * SIZE_32G,
 438     0
 439   };
 440 
 441   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
 442   // the array is sorted.
 443   uint i = 0;
 444   while (addresses[i] != 0 &&
 445          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
 446     i++;
 447   }
 448   uint start = i;
 449 
 450   // Avoid more steps than requested.
 451   i = 0;
 452   while (addresses[start+i] != 0) {
 453     if (i == HeapSearchSteps) {
 454       addresses[start+i] = 0;
 455       break;
 456     }
 457     i++;
 458   }
 459 
 460   return (char**) &addresses[start];
 461 }
 462 
 463 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
 464   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
 465             "can not allocate compressed oop heap for this size");
 466   guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
 467   assert(HeapBaseMinAddress > 0, "sanity");
 468 
 469   const size_t granularity = os::vm_allocation_granularity();
 470   assert((size & (granularity - 1)) == 0,
 471          "size not aligned to os::vm_allocation_granularity()");
 472   assert((alignment & (granularity - 1)) == 0,
 473          "alignment not aligned to os::vm_allocation_granularity()");
 474   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 475          "not a power of 2");
 476 
 477   // The necessary attach point alignment for generated wish addresses.
 478   // This is needed to increase the chance of attaching for mmap and shmat.
 479   const size_t os_attach_point_alignment =
 480     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
 481     NOT_AIX(os::vm_allocation_granularity());
 482   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 483 
 484   char *aligned_heap_base_min_address = (char *)align_ptr_up((void *)HeapBaseMinAddress, alignment);
 485   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
 486     noaccess_prefix_size(alignment) : 0;
 487 
 488   // Attempt to alloc at user-given address.
 489   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 490     try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
 491     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
 492       release();
 493     }
 494   }
 495 
 496   // Keep heap at HeapBaseMinAddress.
 497   if (_base == NULL) {
 498 
 499     // Try to allocate the heap at addresses that allow efficient oop compression.
 500     // Different schemes are tried, in order of decreasing optimization potential.
 501     //
 502     // For this, try_reserve_heap() is called with the desired heap base addresses.
 503     // A call into the os layer to allocate at a given address can return memory
 504     // at a different address than requested.  Still, this might be memory at a useful
 505     // address. try_reserve_heap() always returns this allocated memory, as only here
 506     // the criteria for a good heap are checked.
 507 
 508     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 509     // Give it several tries from top of range to bottom.
 510     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
 511 
 512       // Calc address range within we try to attach (range of possible start addresses).
 513       char* const highest_start = (char *)align_ptr_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 514       char* const lowest_start  = (char *)align_ptr_up(aligned_heap_base_min_address, attach_point_alignment);
 515       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 516                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
 517     }
 518 
 519     // zerobased: Attempt to allocate in the lower 32G.
 520     // But leave room for the compressed class pointers, which is allocated above
 521     // the heap.
 522     char *zerobased_max = (char *)OopEncodingHeapMax;
 523     const size_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
 524     // For small heaps, save some space for compressed class pointer
 525     // space so it can be decoded with no base.
 526     if (UseCompressedClassPointers && !UseSharedSpaces &&
 527         OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
 528         (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
 529       zerobased_max = (char *)OopEncodingHeapMax - class_space;
 530     }
 531 
 532     // Give it several tries from top of range to bottom.
 533     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
 534         ((_base == NULL) ||                        // No previous try succeeded.
 535          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 536 
 537       // Calc address range within we try to attach (range of possible start addresses).
 538       char *const highest_start = (char *)align_ptr_down(zerobased_max - size, attach_point_alignment);
 539       // Need to be careful about size being guaranteed to be less
 540       // than UnscaledOopHeapMax due to type constraints.
 541       char *lowest_start = aligned_heap_base_min_address;
 542       uint64_t unscaled_end = UnscaledOopHeapMax - size;
 543       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
 544         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
 545       }
 546       lowest_start  = (char *)align_ptr_up(lowest_start, attach_point_alignment);
 547       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 548                         aligned_heap_base_min_address, zerobased_max, size, alignment, large);
 549     }
 550 
 551     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 552     // implement null checks.
 553     noaccess_prefix = noaccess_prefix_size(alignment);
 554 
 555     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
 556     char** addresses = get_attach_addresses_for_disjoint_mode();
 557     int i = 0;
 558     while (addresses[i] &&                                 // End of array not yet reached.
 559            ((_base == NULL) ||                             // No previous try succeeded.
 560             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 561              !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 562       char* const attach_point = addresses[i];
 563       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 564       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 565       i++;
 566     }
 567 
 568     // Last, desperate try without any placement.
 569     if (_base == NULL) {
 570       log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
 571       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 572     }
 573   }
 574 }
 575 
 576 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* backingFSforHeap) : ReservedSpace() {
 577 
 578   if (size == 0) {
 579     return;
 580   }
 581 
 582   if (backingFSforHeap != NULL) {
 583     _backing_fd = os::create_file_for_heap(backingFSforHeap, size);
 584   }
 585 
 586   // Heap size should be aligned to alignment, too.
 587   guarantee(is_size_aligned(size, alignment), "set by caller");
 588 
 589   if (UseCompressedOops) {
 590     initialize_compressed_heap(size, alignment, large);
 591     if (_size > size) {
 592       // We allocated heap with noaccess prefix.
 593       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 594       // if we had to try at arbitrary address.
 595       establish_noaccess_prefix();
 596     }
 597   } else {
 598     initialize(size, alignment, large, NULL, false);
 599   }
 600 
 601   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 602          "area must be distinguishable from marks for mark-sweep");
 603   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 604          "area must be distinguishable from marks for mark-sweep");
 605 
 606   if (base() > 0) {
 607     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 608   }
 609 
 610   if (backingFSforHeap != NULL) {
 611     os::close(_backing_fd);
 612   }
 613 }
 614 
 615 // Reserve space for code segment.  Same as Java heap only we mark this as
 616 // executable.
 617 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 618                                      size_t rs_align,
 619                                      bool large) :
 620   ReservedSpace(r_size, rs_align, large, /*executable*/ CodeCacheExtensions::support_dynamic_code()) {
 621   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 622 }
 623 
 624 // VirtualSpace
 625 
 626 VirtualSpace::VirtualSpace() {
 627   _low_boundary           = NULL;
 628   _high_boundary          = NULL;
 629   _low                    = NULL;
 630   _high                   = NULL;
 631   _lower_high             = NULL;
 632   _middle_high            = NULL;
 633   _upper_high             = NULL;
 634   _lower_high_boundary    = NULL;
 635   _middle_high_boundary   = NULL;
 636   _upper_high_boundary    = NULL;
 637   _lower_alignment        = 0;
 638   _middle_alignment       = 0;
 639   _upper_alignment        = 0;
 640   _special                = false;
 641   _executable             = false;
 642 }
 643 
 644 
 645 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 646   const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
 647   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 648 }
 649 
 650 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 651   if(!rs.is_reserved()) return false;  // allocation failed.
 652   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 653   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 654 
 655   _low_boundary  = rs.base();
 656   _high_boundary = low_boundary() + rs.size();
 657 
 658   _low = low_boundary();
 659   _high = low();
 660 
 661   _special = rs.special();
 662   _executable = rs.executable();
 663 
 664   // When a VirtualSpace begins life at a large size, make all future expansion
 665   // and shrinking occur aligned to a granularity of large pages.  This avoids
 666   // fragmentation of physical addresses that inhibits the use of large pages
 667   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 668   // page size, the only spaces that get handled this way are codecache and
 669   // the heap itself, both of which provide a substantial performance
 670   // boost in many benchmarks when covered by large pages.
 671   //
 672   // No attempt is made to force large page alignment at the very top and
 673   // bottom of the space if they are not aligned so already.
 674   _lower_alignment  = os::vm_page_size();
 675   _middle_alignment = max_commit_granularity;
 676   _upper_alignment  = os::vm_page_size();
 677 
 678   // End of each region
 679   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
 680   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
 681   _upper_high_boundary = high_boundary();
 682 
 683   // High address of each region
 684   _lower_high = low_boundary();
 685   _middle_high = lower_high_boundary();
 686   _upper_high = middle_high_boundary();
 687 
 688   // commit to initial size
 689   if (committed_size > 0) {
 690     if (!expand_by(committed_size)) {
 691       return false;
 692     }
 693   }
 694   return true;
 695 }
 696 
 697 
 698 VirtualSpace::~VirtualSpace() {
 699   release();
 700 }
 701 
 702 
 703 void VirtualSpace::release() {
 704   // This does not release memory it never reserved.
 705   // Caller must release via rs.release();
 706   _low_boundary           = NULL;
 707   _high_boundary          = NULL;
 708   _low                    = NULL;
 709   _high                   = NULL;
 710   _lower_high             = NULL;
 711   _middle_high            = NULL;
 712   _upper_high             = NULL;
 713   _lower_high_boundary    = NULL;
 714   _middle_high_boundary   = NULL;
 715   _upper_high_boundary    = NULL;
 716   _lower_alignment        = 0;
 717   _middle_alignment       = 0;
 718   _upper_alignment        = 0;
 719   _special                = false;
 720   _executable             = false;
 721 }
 722 
 723 
 724 size_t VirtualSpace::committed_size() const {
 725   return pointer_delta(high(), low(), sizeof(char));
 726 }
 727 
 728 
 729 size_t VirtualSpace::reserved_size() const {
 730   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 731 }
 732 
 733 
 734 size_t VirtualSpace::uncommitted_size()  const {
 735   return reserved_size() - committed_size();
 736 }
 737 
 738 size_t VirtualSpace::actual_committed_size() const {
 739   // Special VirtualSpaces commit all reserved space up front.
 740   if (special()) {
 741     return reserved_size();
 742   }
 743 
 744   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 745   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 746   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 747 
 748 #ifdef ASSERT
 749   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 750   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 751   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 752 
 753   if (committed_high > 0) {
 754     assert(committed_low == lower, "Must be");
 755     assert(committed_middle == middle, "Must be");
 756   }
 757 
 758   if (committed_middle > 0) {
 759     assert(committed_low == lower, "Must be");
 760   }
 761   if (committed_middle < middle) {
 762     assert(committed_high == 0, "Must be");
 763   }
 764 
 765   if (committed_low < lower) {
 766     assert(committed_high == 0, "Must be");
 767     assert(committed_middle == 0, "Must be");
 768   }
 769 #endif
 770 
 771   return committed_low + committed_middle + committed_high;
 772 }
 773 
 774 
 775 bool VirtualSpace::contains(const void* p) const {
 776   return low() <= (const char*) p && (const char*) p < high();
 777 }
 778 
 779 static void pretouch_expanded_memory(void* start, void* end) {
 780   assert(is_ptr_aligned(start, os::vm_page_size()), "Unexpected alignment");
 781   assert(is_ptr_aligned(end,   os::vm_page_size()), "Unexpected alignment");
 782 
 783   os::pretouch_memory(start, end);
 784 }
 785 
 786 static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) {
 787   if (os::commit_memory(start, size, alignment, executable)) {
 788     if (pre_touch || AlwaysPreTouch) {
 789       pretouch_expanded_memory(start, start + size);
 790     }
 791     return true;
 792   }
 793 
 794   debug_only(warning(
 795       "INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMAT
 796       " size=" SIZE_FORMAT ", executable=%d) failed",
 797       p2i(start), p2i(start + size), size, executable);)
 798 
 799   return false;
 800 }
 801 
 802 /*
 803    First we need to determine if a particular virtual space is using large
 804    pages.  This is done at the initialize function and only virtual spaces
 805    that are larger than LargePageSizeInBytes use large pages.  Once we
 806    have determined this, all expand_by and shrink_by calls must grow and
 807    shrink by large page size chunks.  If a particular request
 808    is within the current large page, the call to commit and uncommit memory
 809    can be ignored.  In the case that the low and high boundaries of this
 810    space is not large page aligned, the pages leading to the first large
 811    page address and the pages after the last large page address must be
 812    allocated with default pages.
 813 */
 814 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 815   if (uncommitted_size() < bytes) {
 816     return false;
 817   }
 818 
 819   if (special()) {
 820     // don't commit memory if the entire space is pinned in memory
 821     _high += bytes;
 822     return true;
 823   }
 824 
 825   char* previous_high = high();
 826   char* unaligned_new_high = high() + bytes;
 827   assert(unaligned_new_high <= high_boundary(), "cannot expand by more than upper boundary");
 828 
 829   // Calculate where the new high for each of the regions should be.  If
 830   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 831   // then the unaligned lower and upper new highs would be the
 832   // lower_high() and upper_high() respectively.
 833   char* unaligned_lower_new_high =  MIN2(unaligned_new_high, lower_high_boundary());
 834   char* unaligned_middle_new_high = MIN2(unaligned_new_high, middle_high_boundary());
 835   char* unaligned_upper_new_high =  MIN2(unaligned_new_high, upper_high_boundary());
 836 
 837   // Align the new highs based on the regions alignment.  lower and upper
 838   // alignment will always be default page size.  middle alignment will be
 839   // LargePageSizeInBytes if the actual size of the virtual space is in
 840   // fact larger than LargePageSizeInBytes.
 841   char* aligned_lower_new_high =  (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 842   char* aligned_middle_new_high = (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 843   char* aligned_upper_new_high =  (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 844 
 845   // Determine which regions need to grow in this expand_by call.
 846   // If you are growing in the lower region, high() must be in that
 847   // region so calculate the size based on high().  For the middle and
 848   // upper regions, determine the starting point of growth based on the
 849   // location of high().  By getting the MAX of the region's low address
 850   // (or the previous region's high address) and high(), we can tell if it
 851   // is an intra or inter region growth.
 852   size_t lower_needs = 0;
 853   if (aligned_lower_new_high > lower_high()) {
 854     lower_needs = pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 855   }
 856   size_t middle_needs = 0;
 857   if (aligned_middle_new_high > middle_high()) {
 858     middle_needs = pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 859   }
 860   size_t upper_needs = 0;
 861   if (aligned_upper_new_high > upper_high()) {
 862     upper_needs = pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 863   }
 864 
 865   // Check contiguity.
 866   assert(low_boundary() <= lower_high() && lower_high() <= lower_high_boundary(),
 867          "high address must be contained within the region");
 868   assert(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary(),
 869          "high address must be contained within the region");
 870   assert(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary(),
 871          "high address must be contained within the region");
 872 
 873   // Commit regions
 874   if (lower_needs > 0) {
 875     assert(lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region");
 876     if (!commit_expanded(lower_high(), lower_needs, _lower_alignment, pre_touch, _executable)) {
 877       return false;
 878     }
 879     _lower_high += lower_needs;
 880   }
 881 
 882   if (middle_needs > 0) {
 883     assert(middle_high() + middle_needs <= middle_high_boundary(), "must not expand beyond region");
 884     if (!commit_expanded(middle_high(), middle_needs, _middle_alignment, pre_touch, _executable)) {
 885       return false;
 886     }
 887     _middle_high += middle_needs;
 888   }
 889 
 890   if (upper_needs > 0) {
 891     assert(upper_high() + upper_needs <= upper_high_boundary(), "must not expand beyond region");
 892     if (!commit_expanded(upper_high(), upper_needs, _upper_alignment, pre_touch, _executable)) {
 893       return false;
 894     }
 895     _upper_high += upper_needs;
 896   }
 897 
 898   _high += bytes;
 899   return true;
 900 }
 901 
 902 // A page is uncommitted if the contents of the entire page is deemed unusable.
 903 // Continue to decrement the high() pointer until it reaches a page boundary
 904 // in which case that particular page can now be uncommitted.
 905 void VirtualSpace::shrink_by(size_t size) {
 906   if (committed_size() < size)
 907     fatal("Cannot shrink virtual space to negative size");
 908 
 909   if (special()) {
 910     // don't uncommit if the entire space is pinned in memory
 911     _high -= size;
 912     return;
 913   }
 914 
 915   char* unaligned_new_high = high() - size;
 916   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 917 
 918   // Calculate new unaligned address
 919   char* unaligned_upper_new_high =
 920     MAX2(unaligned_new_high, middle_high_boundary());
 921   char* unaligned_middle_new_high =
 922     MAX2(unaligned_new_high, lower_high_boundary());
 923   char* unaligned_lower_new_high =
 924     MAX2(unaligned_new_high, low_boundary());
 925 
 926   // Align address to region's alignment
 927   char* aligned_upper_new_high =
 928     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 929   char* aligned_middle_new_high =
 930     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 931   char* aligned_lower_new_high =
 932     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 933 
 934   // Determine which regions need to shrink
 935   size_t upper_needs = 0;
 936   if (aligned_upper_new_high < upper_high()) {
 937     upper_needs =
 938       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 939   }
 940   size_t middle_needs = 0;
 941   if (aligned_middle_new_high < middle_high()) {
 942     middle_needs =
 943       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 944   }
 945   size_t lower_needs = 0;
 946   if (aligned_lower_new_high < lower_high()) {
 947     lower_needs =
 948       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 949   }
 950 
 951   // Check contiguity.
 952   assert(middle_high_boundary() <= upper_high() &&
 953          upper_high() <= upper_high_boundary(),
 954          "high address must be contained within the region");
 955   assert(lower_high_boundary() <= middle_high() &&
 956          middle_high() <= middle_high_boundary(),
 957          "high address must be contained within the region");
 958   assert(low_boundary() <= lower_high() &&
 959          lower_high() <= lower_high_boundary(),
 960          "high address must be contained within the region");
 961 
 962   // Uncommit
 963   if (upper_needs > 0) {
 964     assert(middle_high_boundary() <= aligned_upper_new_high &&
 965            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 966            "must not shrink beyond region");
 967     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 968       debug_only(warning("os::uncommit_memory failed"));
 969       return;
 970     } else {
 971       _upper_high -= upper_needs;
 972     }
 973   }
 974   if (middle_needs > 0) {
 975     assert(lower_high_boundary() <= aligned_middle_new_high &&
 976            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 977            "must not shrink beyond region");
 978     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 979       debug_only(warning("os::uncommit_memory failed"));
 980       return;
 981     } else {
 982       _middle_high -= middle_needs;
 983     }
 984   }
 985   if (lower_needs > 0) {
 986     assert(low_boundary() <= aligned_lower_new_high &&
 987            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 988            "must not shrink beyond region");
 989     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 990       debug_only(warning("os::uncommit_memory failed"));
 991       return;
 992     } else {
 993       _lower_high -= lower_needs;
 994     }
 995   }
 996 
 997   _high -= size;
 998 }
 999 
1000 #ifndef PRODUCT
1001 void VirtualSpace::check_for_contiguity() {
1002   // Check contiguity.
1003   assert(low_boundary() <= lower_high() &&
1004          lower_high() <= lower_high_boundary(),
1005          "high address must be contained within the region");
1006   assert(lower_high_boundary() <= middle_high() &&
1007          middle_high() <= middle_high_boundary(),
1008          "high address must be contained within the region");
1009   assert(middle_high_boundary() <= upper_high() &&
1010          upper_high() <= upper_high_boundary(),
1011          "high address must be contained within the region");
1012   assert(low() >= low_boundary(), "low");
1013   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
1014   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
1015   assert(high() <= upper_high(), "upper high");
1016 }
1017 
1018 void VirtualSpace::print_on(outputStream* out) {
1019   out->print   ("Virtual space:");
1020   if (special()) out->print(" (pinned in memory)");
1021   out->cr();
1022   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
1023   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
1024   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low()), p2i(high()));
1025   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low_boundary()), p2i(high_boundary()));
1026 }
1027 
1028 void VirtualSpace::print() {
1029   print_on(tty);
1030 }
1031 
1032 /////////////// Unit tests ///////////////
1033 
1034 #ifndef PRODUCT
1035 
1036 #define test_log(...) \
1037   do {\
1038     if (VerboseInternalVMTests) { \
1039       tty->print_cr(__VA_ARGS__); \
1040       tty->flush(); \
1041     }\
1042   } while (false)
1043 
1044 class TestReservedSpace : AllStatic {
1045  public:
1046   static void small_page_write(void* addr, size_t size) {
1047     size_t page_size = os::vm_page_size();
1048 
1049     char* end = (char*)addr + size;
1050     for (char* p = (char*)addr; p < end; p += page_size) {
1051       *p = 1;
1052     }
1053   }
1054 
1055   static void release_memory_for_test(ReservedSpace rs) {
1056     if (rs.special()) {
1057       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1058     } else {
1059       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1060     }
1061   }
1062 
1063   static void test_reserved_space1(size_t size, size_t alignment) {
1064     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
1065 
1066     assert(is_size_aligned(size, alignment), "Incorrect input parameters");
1067 
1068     ReservedSpace rs(size,          // size
1069                      alignment,     // alignment
1070                      UseLargePages, // large
1071                      (char *)NULL); // requested_address
1072 
1073     test_log(" rs.special() == %d", rs.special());
1074 
1075     assert(rs.base() != NULL, "Must be");
1076     assert(rs.size() == size, "Must be");
1077 
1078     assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1079     assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1080 
1081     if (rs.special()) {
1082       small_page_write(rs.base(), size);
1083     }
1084 
1085     release_memory_for_test(rs);
1086   }
1087 
1088   static void test_reserved_space2(size_t size) {
1089     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
1090 
1091     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1092 
1093     ReservedSpace rs(size);
1094 
1095     test_log(" rs.special() == %d", rs.special());
1096 
1097     assert(rs.base() != NULL, "Must be");
1098     assert(rs.size() == size, "Must be");
1099 
1100     if (rs.special()) {
1101       small_page_write(rs.base(), size);
1102     }
1103 
1104     release_memory_for_test(rs);
1105   }
1106 
1107   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1108     test_log("test_reserved_space3(%p, %p, %d)",
1109         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
1110 
1111     if (size < alignment) {
1112       // Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test.
1113       assert((size_t)os::vm_page_size() == os::large_page_size(), "Test needs further refinement");
1114       return;
1115     }
1116 
1117     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1118     assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
1119 
1120     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1121 
1122     ReservedSpace rs(size, alignment, large, false);
1123 
1124     test_log(" rs.special() == %d", rs.special());
1125 
1126     assert(rs.base() != NULL, "Must be");
1127     assert(rs.size() == size, "Must be");
1128 
1129     if (rs.special()) {
1130       small_page_write(rs.base(), size);
1131     }
1132 
1133     release_memory_for_test(rs);
1134   }
1135 
1136 
1137   static void test_reserved_space1() {
1138     size_t size = 2 * 1024 * 1024;
1139     size_t ag   = os::vm_allocation_granularity();
1140 
1141     test_reserved_space1(size,      ag);
1142     test_reserved_space1(size * 2,  ag);
1143     test_reserved_space1(size * 10, ag);
1144   }
1145 
1146   static void test_reserved_space2() {
1147     size_t size = 2 * 1024 * 1024;
1148     size_t ag = os::vm_allocation_granularity();
1149 
1150     test_reserved_space2(size * 1);
1151     test_reserved_space2(size * 2);
1152     test_reserved_space2(size * 10);
1153     test_reserved_space2(ag);
1154     test_reserved_space2(size - ag);
1155     test_reserved_space2(size);
1156     test_reserved_space2(size + ag);
1157     test_reserved_space2(size * 2);
1158     test_reserved_space2(size * 2 - ag);
1159     test_reserved_space2(size * 2 + ag);
1160     test_reserved_space2(size * 3);
1161     test_reserved_space2(size * 3 - ag);
1162     test_reserved_space2(size * 3 + ag);
1163     test_reserved_space2(size * 10);
1164     test_reserved_space2(size * 10 + size / 2);
1165   }
1166 
1167   static void test_reserved_space3() {
1168     size_t ag = os::vm_allocation_granularity();
1169 
1170     test_reserved_space3(ag,      ag    , false);
1171     test_reserved_space3(ag * 2,  ag    , false);
1172     test_reserved_space3(ag * 3,  ag    , false);
1173     test_reserved_space3(ag * 2,  ag * 2, false);
1174     test_reserved_space3(ag * 4,  ag * 2, false);
1175     test_reserved_space3(ag * 8,  ag * 2, false);
1176     test_reserved_space3(ag * 4,  ag * 4, false);
1177     test_reserved_space3(ag * 8,  ag * 4, false);
1178     test_reserved_space3(ag * 16, ag * 4, false);
1179 
1180     if (UseLargePages) {
1181       size_t lp = os::large_page_size();
1182 
1183       // Without large pages
1184       test_reserved_space3(lp,     ag * 4, false);
1185       test_reserved_space3(lp * 2, ag * 4, false);
1186       test_reserved_space3(lp * 4, ag * 4, false);
1187       test_reserved_space3(lp,     lp    , false);
1188       test_reserved_space3(lp * 2, lp    , false);
1189       test_reserved_space3(lp * 3, lp    , false);
1190       test_reserved_space3(lp * 2, lp * 2, false);
1191       test_reserved_space3(lp * 4, lp * 2, false);
1192       test_reserved_space3(lp * 8, lp * 2, false);
1193 
1194       // With large pages
1195       test_reserved_space3(lp, ag * 4    , true);
1196       test_reserved_space3(lp * 2, ag * 4, true);
1197       test_reserved_space3(lp * 4, ag * 4, true);
1198       test_reserved_space3(lp, lp        , true);
1199       test_reserved_space3(lp * 2, lp    , true);
1200       test_reserved_space3(lp * 3, lp    , true);
1201       test_reserved_space3(lp * 2, lp * 2, true);
1202       test_reserved_space3(lp * 4, lp * 2, true);
1203       test_reserved_space3(lp * 8, lp * 2, true);
1204     }
1205   }
1206 
1207   static void test_reserved_space() {
1208     test_reserved_space1();
1209     test_reserved_space2();
1210     test_reserved_space3();
1211   }
1212 };
1213 
1214 void TestReservedSpace_test() {
1215   TestReservedSpace::test_reserved_space();
1216 }
1217 
1218 #define assert_equals(actual, expected)  \
1219   assert(actual == expected,             \
1220          "Got " SIZE_FORMAT " expected " \
1221          SIZE_FORMAT, actual, expected);
1222 
1223 #define assert_ge(value1, value2)                  \
1224   assert(value1 >= value2,                         \
1225          "'" #value1 "': " SIZE_FORMAT " '"        \
1226          #value2 "': " SIZE_FORMAT, value1, value2);
1227 
1228 #define assert_lt(value1, value2)                  \
1229   assert(value1 < value2,                          \
1230          "'" #value1 "': " SIZE_FORMAT " '"        \
1231          #value2 "': " SIZE_FORMAT, value1, value2);
1232 
1233 
1234 class TestVirtualSpace : AllStatic {
1235   enum TestLargePages {
1236     Default,
1237     Disable,
1238     Reserve,
1239     Commit
1240   };
1241 
1242   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
1243     switch(mode) {
1244     default:
1245     case Default:
1246     case Reserve:
1247       return ReservedSpace(reserve_size_aligned);
1248     case Disable:
1249     case Commit:
1250       return ReservedSpace(reserve_size_aligned,
1251                            os::vm_allocation_granularity(),
1252                            /* large */ false, /* exec */ false);
1253     }
1254   }
1255 
1256   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1257     switch(mode) {
1258     default:
1259     case Default:
1260     case Reserve:
1261       return vs.initialize(rs, 0);
1262     case Disable:
1263       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1264     case Commit:
1265       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1266     }
1267   }
1268 
1269  public:
1270   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1271                                                         TestLargePages mode = Default) {
1272     size_t granularity = os::vm_allocation_granularity();
1273     size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
1274 
1275     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1276 
1277     assert(reserved.is_reserved(), "Must be");
1278 
1279     VirtualSpace vs;
1280     bool initialized = initialize_virtual_space(vs, reserved, mode);
1281     assert(initialized, "Failed to initialize VirtualSpace");
1282 
1283     vs.expand_by(commit_size, false);
1284 
1285     if (vs.special()) {
1286       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1287     } else {
1288       assert_ge(vs.actual_committed_size(), commit_size);
1289       // Approximate the commit granularity.
1290       // Make sure that we don't commit using large pages
1291       // if large pages has been disabled for this VirtualSpace.
1292       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1293                                    os::vm_page_size() : os::large_page_size();
1294       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1295     }
1296 
1297     reserved.release();
1298   }
1299 
1300   static void test_virtual_space_actual_committed_space_one_large_page() {
1301     if (!UseLargePages) {
1302       return;
1303     }
1304 
1305     size_t large_page_size = os::large_page_size();
1306 
1307     ReservedSpace reserved(large_page_size, large_page_size, true, false);
1308 
1309     assert(reserved.is_reserved(), "Must be");
1310 
1311     VirtualSpace vs;
1312     bool initialized = vs.initialize(reserved, 0);
1313     assert(initialized, "Failed to initialize VirtualSpace");
1314 
1315     vs.expand_by(large_page_size, false);
1316 
1317     assert_equals(vs.actual_committed_size(), large_page_size);
1318 
1319     reserved.release();
1320   }
1321 
1322   static void test_virtual_space_actual_committed_space() {
1323     test_virtual_space_actual_committed_space(4 * K, 0);
1324     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1325     test_virtual_space_actual_committed_space(8 * K, 0);
1326     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1327     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1328     test_virtual_space_actual_committed_space(12 * K, 0);
1329     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1330     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1331     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1332     test_virtual_space_actual_committed_space(64 * K, 0);
1333     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1334     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1335     test_virtual_space_actual_committed_space(2 * M, 0);
1336     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1337     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1338     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1339     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1340     test_virtual_space_actual_committed_space(10 * M, 0);
1341     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1342     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1343     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1344     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1345     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1346     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1347   }
1348 
1349   static void test_virtual_space_disable_large_pages() {
1350     if (!UseLargePages) {
1351       return;
1352     }
1353     // These test cases verify that if we force VirtualSpace to disable large pages
1354     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1355     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1356     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1357     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1358     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1359     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1360     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1361 
1362     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1363     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1364     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1365     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1366     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1367     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1368     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1369 
1370     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1371     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1372     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1373     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1374     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1375     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1376     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1377   }
1378 
1379   static void test_virtual_space() {
1380     test_virtual_space_actual_committed_space();
1381     test_virtual_space_actual_committed_space_one_large_page();
1382     test_virtual_space_disable_large_pages();
1383   }
1384 };
1385 
1386 void TestVirtualSpace_test() {
1387   TestVirtualSpace::test_virtual_space();
1388 }
1389 
1390 #endif // PRODUCT
1391 
1392 #endif