1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "logging/log.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "memory/virtualspace.hpp"
  29 #include "oops/markOop.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "runtime/os.inline.hpp"
  32 #include "services/memTracker.hpp"
  33 #include "utilities/align.hpp"
  34 
  35 // ReservedSpace
  36 
  37 // Dummy constructor
  38 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0), 
  39     _nvdimm_base_nv(NULL), _nvdimm_size(0), _dram_size(0),
  40     _alignment(0), _special(false), _executable(false), _fd_for_heap(-1), _fd_for_nvdimm(-1) {
  41 }
  42 
  43 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1), _fd_for_nvdimm(-1), 
  44     _nvdimm_base_nv(NULL), _nvdimm_size(0), _dram_size(0) {
  45   bool has_preferred_page_size = preferred_page_size != 0;
  46   // Want to use large pages where possible and pad with small pages.
  47   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
  48   bool large_pages = page_size != (size_t)os::vm_page_size();
  49   size_t alignment;
  50   if (large_pages && has_preferred_page_size) {
  51     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  52     // ReservedSpace initialization requires size to be aligned to the given
  53     // alignment. Align the size up.
  54     size = align_up(size, alignment);
  55   } else {
  56     // Don't force the alignment to be large page aligned,
  57     // since that will waste memory.
  58     alignment = os::vm_allocation_granularity();
  59   }
  60   initialize(size, alignment, large_pages, NULL, false);
  61 }
  62 
  63 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  64                              bool large,
  65                              char* requested_address) : _fd_for_heap(-1), _fd_for_nvdimm(-1), 
  66                              _nvdimm_base_nv(NULL), _nvdimm_size(0), _dram_size(0) {
  67   initialize(size, alignment, large, requested_address, false);
  68 }
  69 
  70 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  71                              bool large,
  72                              bool executable) : _fd_for_heap(-1), _fd_for_nvdimm(-1), 
  73                              _nvdimm_base_nv(NULL), _nvdimm_size(0), _dram_size(0) {
  74   initialize(size, alignment, large, NULL, executable);
  75 }
  76 
  77 // Helper method
  78 static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) {
  79   if (is_file_mapped) {
  80     if (!os::unmap_memory(base, size)) {
  81       fatal("os::unmap_memory failed");
  82     }
  83   } else if (!os::release_memory(base, size)) {
  84     fatal("os::release_memory failed");
  85   }
  86 }
  87 
  88 // Helper method.
  89 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  90                                            const size_t size, bool special, bool is_file_mapped = false)
  91 {
  92   if (base == requested_address || requested_address == NULL)
  93     return false; // did not fail
  94 
  95   if (base != NULL) {
  96     // Different reserve address may be acceptable in other cases
  97     // but for compressed oops heap should be at requested address.
  98     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  99     log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
 100     // OS ignored requested address. Try different address.
 101     if (special) {
 102       if (!os::release_memory_special(base, size)) {
 103         fatal("os::release_memory_special failed");
 104       }
 105     } else {
 106       unmap_or_release_memory(base, size, is_file_mapped);
 107     }
 108   }
 109   return true;
 110 }
 111 
 112 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
 113                                char* requested_address,
 114                                bool executable) {
 115   const size_t granularity = os::vm_allocation_granularity();
 116   assert((size & (granularity - 1)) == 0,
 117          "size not aligned to os::vm_allocation_granularity()");
 118   assert((alignment & (granularity - 1)) == 0,
 119          "alignment not aligned to os::vm_allocation_granularity()");
 120   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 121          "not a power of 2");
 122 
 123   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 124 
 125   _base = NULL;
 126   _size = 0;
 127   _special = false;
 128   _executable = executable;
 129   _alignment = 0;
 130   _noaccess_prefix = 0;
 131   if (size == 0) {
 132     return;
 133   }
 134 
 135   // If OS doesn't support demand paging for large page memory, we need
 136   // to use reserve_memory_special() to reserve and pin the entire region.
 137   // If there is a backing file directory for this space then whether
 138   // large pages are allocated is up to the filesystem of the backing file.
 139   // So we ignore the UseLargePages flag in this case.
 140   bool special = large && !os::can_commit_large_page_memory();
 141   if (special && _fd_for_heap != -1) {
 142     special = false;
 143     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 144       !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 145       log_debug(gc, heap)("Ignoring UseLargePages since large page support is up to the file system of the backing file for Java heap");
 146     }
 147   }
 148 
 149   char* base = NULL;
 150   char* nvdimm_base = NULL;
 151 
 152   if (special) {
 153 
 154     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 155 
 156     if (base != NULL) {
 157       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 158         // OS ignored requested address. Try different address.
 159         return;
 160       }
 161       // Check alignment constraints.
 162       assert((uintptr_t) base % alignment == 0,
 163              "Large pages returned a non-aligned address, base: "
 164              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 165              p2i(base), alignment);
 166       _special = true;
 167     } else {
 168       // failed; try to reserve regular memory below
 169       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 170                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 171         log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 172       }
 173     }
 174   }
 175 
 176   if (base == NULL) {
 177     // Optimistically assume that the OSes returns an aligned base pointer.
 178     // When reserving a large address range, most OSes seem to align to at
 179     // least 64K.
 180 
 181     // If the memory was requested at a particular address, use
 182     // os::attempt_reserve_memory_at() to avoid over mapping something
 183     // important.  If available space is not detected, return NULL.
 184 
 185     if (requested_address != 0) {
 186       base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
 187       if (failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
 188         // OS ignored requested address. Try different address.
 189         base = NULL;
 190       }
 191     } else {
 192       if (_nvdimm_base_nv != NULL && _fd_for_nvdimm != -1) {
 193         base = os::reserve_memory(_dram_size, _nvdimm_base_nv, alignment, _fd_for_heap);
 194       } else {
 195         base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
 196       }
 197     }
 198 
 199     if (base == NULL) return;
 200 
 201     // Check alignment constraints
 202     if ((((size_t)base) & (alignment - 1)) != 0) {
 203       // Base not aligned, retry
 204       unmap_or_release_memory(base, size, _fd_for_heap != -1 /*is_file_mapped*/);
 205 
 206       // Make sure that size is aligned
 207       size = align_up(size, alignment);
 208       base = os::reserve_memory_aligned(size, alignment, _fd_for_heap);
 209 
 210       if (requested_address != 0 &&
 211           failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
 212         // As a result of the alignment constraints, the allocated base differs
 213         // from the requested address. Return back to the caller who can
 214         // take remedial action (like try again without a requested address).
 215         assert(_base == NULL, "should be");
 216         return;
 217       }
 218     }
 219   }
 220   // Done
 221   _base = base;
 222   _nvdimm_base = _base-_nvdimm_size;
 223   _nvdimm_base_nv = NULL;
 224   _dram_size = (size_t)size;
 225   _size = size;
 226   _alignment = alignment;
 227   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 228   if (_fd_for_heap != -1) {
 229     _special = true;
 230   }
 231 }
 232 
 233 
 234 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 235                              bool special, bool executable) {
 236   assert((size % os::vm_allocation_granularity()) == 0,
 237          "size not allocation aligned");
 238   _base = base;
 239   _size = size;
 240   _nvdimm_base = NULL; 
 241   _nvdimm_base_nv = NULL;
 242   _dram_size = (size_t)size;
 243   _alignment = alignment;
 244   _noaccess_prefix = 0;
 245   _special = special;
 246   _executable = executable;
 247 }
 248 
 249 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 250                                         bool split, bool realloc) {
 251   assert(partition_size <= size(), "partition failed");
 252   if (split) {
 253     os::split_reserved_memory(base(), size(), partition_size, realloc);
 254   }
 255   ReservedSpace result(base(), partition_size, alignment, special(),
 256                        executable());
 257   return result;
 258 }
 259 
 260 
 261 ReservedSpace
 262 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 263   assert(partition_size <= size(), "partition failed");
 264   ReservedSpace result(base() + partition_size, size() - partition_size,
 265                        alignment, special(), executable());
 266   return result;
 267 }
 268 
 269 
 270 size_t ReservedSpace::page_align_size_up(size_t size) {
 271   return align_up(size, os::vm_page_size());
 272 }
 273 
 274 
 275 size_t ReservedSpace::page_align_size_down(size_t size) {
 276   return align_down(size, os::vm_page_size());
 277 }
 278 
 279 
 280 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 281   return align_up(size, os::vm_allocation_granularity());
 282 }
 283 
 284 
 285 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 286   return align_down(size, os::vm_allocation_granularity());
 287 }
 288 
 289 
 290 void ReservedSpace::release() {
 291   if (is_reserved()) {
 292     char *real_base = _base - _noaccess_prefix;
 293     const size_t real_size = _size + _noaccess_prefix;
 294     // unmap nvdimm
 295     if (_fd_for_nvdimm != -1) {
 296       os::unmap_memory(real_base+real_size, _nvdimm_size);
 297     }
 298     if (special()) {
 299       if (_fd_for_heap != -1) {
 300         os::unmap_memory(real_base, real_size);
 301       } else {
 302         os::release_memory_special(real_base, real_size);
 303       }
 304     } else{
 305       os::release_memory(real_base, real_size);
 306     }
 307     _base = NULL;
 308     _nvdimm_base = NULL;
 309     _nvdimm_base_nv = NULL;
 310     _dram_size = 0;
 311     _nvdimm_size = 0;
 312     _size = 0;
 313     _noaccess_prefix = 0;
 314     _alignment = 0;
 315     _special = false;
 316     _executable = false;
 317   }
 318 }
 319 
 320 static size_t noaccess_prefix_size(size_t alignment) {
 321   return lcm(os::vm_page_size(), alignment);
 322 }
 323 
 324 void ReservedHeapSpace::establish_noaccess_prefix() {
 325   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 326   _noaccess_prefix = noaccess_prefix_size(_alignment);
 327 
 328   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 329     if (true
 330         WIN64_ONLY(&& !UseLargePages)
 331         AIX_ONLY(&& os::vm_page_size() != 64*K)) {
 332       // Protect memory at the base of the allocated region.
 333       // If special, the page was committed (only matters on windows)
 334       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 335         fatal("cannot protect protection page");
 336       }
 337       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
 338                                  PTR_FORMAT " / " INTX_FORMAT " bytes",
 339                                  p2i(_base),
 340                                  _noaccess_prefix);
 341       assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
 342     } else {
 343       Universe::set_narrow_oop_use_implicit_null_checks(false);
 344     }
 345   }
 346 
 347   _base += _noaccess_prefix;
 348   _size -= _noaccess_prefix;
 349   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 350 }
 351 
 352 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 353 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 354 // might still fulfill the wishes of the caller.
 355 // Assures the memory is aligned to 'alignment'.
 356 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 357 void ReservedHeapSpace::try_reserve_heap(size_t size,
 358                                          size_t alignment,
 359                                          bool large,
 360                                          char* requested_address) {
 361   if (_base != NULL) {
 362     // We tried before, but we didn't like the address delivered.
 363     release();
 364   }
 365 
 366   if (_fd_for_nvdimm != -1 && UseG1GC) {
 367     char* base_nv = os::reserve_memory(size, requested_address, alignment);
 368     initialize_g1gc_nvdimm_dram_sizes(size, alignment);
 369     _nvdimm_base_nv = base_nv+_nvdimm_size; // hint for allocation address of DRAM COMPRESSED HEAP.
 370   }
 371 
 372   // If OS doesn't support demand paging for large page memory, we need
 373   // to use reserve_memory_special() to reserve and pin the entire region.
 374   // If there is a backing file directory for this space then whether
 375   // large pages are allocated is up to the filesystem of the backing file.
 376   // So we ignore the UseLargePages flag in this case.
 377   bool special = large && !os::can_commit_large_page_memory();
 378   if (special && _fd_for_heap != -1) {
 379     special = false;
 380     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 381                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 382       log_debug(gc, heap)("Cannot allocate large pages for Java Heap when AllocateHeapAt option is set.");
 383     }
 384   }
 385   char* base = NULL;
 386   char* nvdimm_base = NULL;
 387 
 388   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
 389                              " heap of size " SIZE_FORMAT_HEX,
 390                              p2i(requested_address),
 391                              size);
 392 
 393   if (special) {
 394     base = os::reserve_memory_special(size, alignment, requested_address, false);
 395 
 396     if (base != NULL) {
 397       // Check alignment constraints.
 398       assert((uintptr_t) base % alignment == 0,
 399              "Large pages returned a non-aligned address, base: "
 400              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 401              p2i(base), alignment);
 402       _special = true;
 403     }
 404   }
 405 
 406   if (base == NULL) {
 407     // Failed; try to reserve regular memory below
 408     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 409                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 410       log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 411     }
 412 
 413     // Optimistically assume that the OSes returns an aligned base pointer.
 414     // When reserving a large address range, most OSes seem to align to at
 415     // least 64K.
 416 
 417     // If the memory was requested at a particular address, use
 418     // os::attempt_reserve_memory_at() to avoid over mapping something
 419     // important.  If available space is not detected, return NULL.
 420 
 421     if (requested_address != 0) {
 422       if (_nvdimm_base_nv != NULL && _fd_for_nvdimm != -1) {
 423         // first unmap so that OS does not keep trying.
 424         os::unmap_memory(_nvdimm_base_nv, _dram_size);
 425         base = os::attempt_reserve_memory_at(_dram_size, _nvdimm_base_nv);
 426       } else {
 427         base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
 428       }
 429     } else {
 430       if (_nvdimm_base_nv != NULL && _fd_for_nvdimm != -1) {
 431         // first unmap so that OS does not keep trying.
 432         os::unmap_memory(_nvdimm_base_nv, _dram_size);
 433         base = os::reserve_memory(_dram_size, _nvdimm_base_nv, alignment);
 434       } else {
 435         base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
 436       }
 437     }
 438   }
 439   if (base == NULL) { return; }
 440 
 441   // Done
 442   _base = base;
 443   _nvdimm_base = _base-_nvdimm_size;
 444   if (_nvdimm_base_nv != NULL && _fd_for_nvdimm != -1) {
 445     _size = _dram_size;
 446   } else {
 447     _size = size;
 448   }
 449   _alignment = alignment;
 450 
 451   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 452   if (_fd_for_heap != -1) {
 453     _special = true;
 454   }
 455 
 456   // Check alignment constraints
 457   if ((((size_t)base) & (alignment - 1)) != 0) {
 458     // Base not aligned, retry.
 459     release();
 460   }
 461 }
 462 
 463 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 464                                           char *lowest_start,
 465                                           size_t attach_point_alignment,
 466                                           char *aligned_heap_base_min_address,
 467                                           char *upper_bound,
 468                                           size_t size,
 469                                           size_t alignment,
 470                                           bool large) {
 471   const size_t attach_range = highest_start - lowest_start;
 472   // Cap num_attempts at possible number.
 473   // At least one is possible even for 0 sized attach range.
 474   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
 475   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
 476 
 477   const size_t stepsize = (attach_range == 0) ? // Only one try.
 478     (size_t) highest_start : align_up(attach_range / num_attempts_to_try, attach_point_alignment);
 479 
 480   // Try attach points from top to bottom.
 481   char* attach_point = highest_start;
 482   while (attach_point >= lowest_start  &&
 483          attach_point <= highest_start &&  // Avoid wrap around.
 484          ((_base == NULL) ||
 485           (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
 486     try_reserve_heap(size, alignment, large, attach_point);
 487     attach_point -= stepsize;
 488   }
 489 }
 490 
 491 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
 492 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
 493 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
 494 
 495 // Helper for heap allocation. Returns an array with addresses
 496 // (OS-specific) which are suited for disjoint base mode. Array is
 497 // NULL terminated.
 498 static char** get_attach_addresses_for_disjoint_mode() {
 499   static uint64_t addresses[] = {
 500      2 * SIZE_32G,
 501      3 * SIZE_32G,
 502      4 * SIZE_32G,
 503      8 * SIZE_32G,
 504     10 * SIZE_32G,
 505      1 * SIZE_64K * SIZE_32G,
 506      2 * SIZE_64K * SIZE_32G,
 507      3 * SIZE_64K * SIZE_32G,
 508      4 * SIZE_64K * SIZE_32G,
 509     16 * SIZE_64K * SIZE_32G,
 510     32 * SIZE_64K * SIZE_32G,
 511     34 * SIZE_64K * SIZE_32G,
 512     0
 513   };
 514 
 515   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
 516   // the array is sorted.
 517   uint i = 0;
 518   while (addresses[i] != 0 &&
 519          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
 520     i++;
 521   }
 522   uint start = i;
 523 
 524   // Avoid more steps than requested.
 525   i = 0;
 526   while (addresses[start+i] != 0) {
 527     if (i == HeapSearchSteps) {
 528       addresses[start+i] = 0;
 529       break;
 530     }
 531     i++;
 532   }
 533 
 534   return (char**) &addresses[start];
 535 }
 536 
 537 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
 538   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
 539             "can not allocate compressed oop heap for this size");
 540   guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
 541   assert(HeapBaseMinAddress > 0, "sanity");
 542 
 543   const size_t granularity = os::vm_allocation_granularity();
 544   assert((size & (granularity - 1)) == 0,
 545          "size not aligned to os::vm_allocation_granularity()");
 546   assert((alignment & (granularity - 1)) == 0,
 547          "alignment not aligned to os::vm_allocation_granularity()");
 548   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 549          "not a power of 2");
 550 
 551   // The necessary attach point alignment for generated wish addresses.
 552   // This is needed to increase the chance of attaching for mmap and shmat.
 553   const size_t os_attach_point_alignment =
 554     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
 555     NOT_AIX(os::vm_allocation_granularity());
 556   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 557 
 558   char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment);
 559   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
 560     noaccess_prefix_size(alignment) : 0;
 561 
 562   // Attempt to alloc at user-given address.
 563   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 564     try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
 565     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
 566       release();
 567     }
 568   }
 569 
 570   // Keep heap at HeapBaseMinAddress.
 571   if (_base == NULL) {
 572 
 573     // Try to allocate the heap at addresses that allow efficient oop compression.
 574     // Different schemes are tried, in order of decreasing optimization potential.
 575     //
 576     // For this, try_reserve_heap() is called with the desired heap base addresses.
 577     // A call into the os layer to allocate at a given address can return memory
 578     // at a different address than requested.  Still, this might be memory at a useful
 579     // address. try_reserve_heap() always returns this allocated memory, as only here
 580     // the criteria for a good heap are checked.
 581 
 582     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 583     // Give it several tries from top of range to bottom.
 584     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
 585 
 586       // Calc address range within we try to attach (range of possible start addresses).
 587       char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 588       char* const lowest_start  = align_up(aligned_heap_base_min_address, attach_point_alignment);
 589       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 590                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
 591     }
 592 
 593     // zerobased: Attempt to allocate in the lower 32G.
 594     // But leave room for the compressed class pointers, which is allocated above
 595     // the heap.
 596     char *zerobased_max = (char *)OopEncodingHeapMax;
 597     const size_t class_space = align_up(CompressedClassSpaceSize, alignment);
 598     // For small heaps, save some space for compressed class pointer
 599     // space so it can be decoded with no base.
 600     if (UseCompressedClassPointers && !UseSharedSpaces &&
 601         OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
 602         (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
 603       zerobased_max = (char *)OopEncodingHeapMax - class_space;
 604     }
 605 
 606     // Give it several tries from top of range to bottom.
 607     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
 608         ((_base == NULL) ||                        // No previous try succeeded.
 609          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 610 
 611       // Calc address range within we try to attach (range of possible start addresses).
 612       char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
 613       // Need to be careful about size being guaranteed to be less
 614       // than UnscaledOopHeapMax due to type constraints.
 615       char *lowest_start = aligned_heap_base_min_address;
 616       uint64_t unscaled_end = UnscaledOopHeapMax - size;
 617       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
 618         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
 619       }
 620       lowest_start = align_up(lowest_start, attach_point_alignment);
 621       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 622                         aligned_heap_base_min_address, zerobased_max, size, alignment, large);
 623     }
 624 
 625     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 626     // implement null checks.
 627     noaccess_prefix = noaccess_prefix_size(alignment);
 628 
 629     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
 630     char** addresses = get_attach_addresses_for_disjoint_mode();
 631     int i = 0;
 632     while (addresses[i] &&                                 // End of array not yet reached.
 633            ((_base == NULL) ||                             // No previous try succeeded.
 634             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 635              !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 636       char* const attach_point = addresses[i];
 637       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 638       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 639       i++;
 640     }
 641 
 642     // Last, desperate try without any placement.
 643     if (_base == NULL) {
 644       log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
 645       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 646     }
 647   }
 648 }
 649 
 650 void ReservedHeapSpace::initialize_g1gc_nvdimm_dram_sizes(size_t size, size_t alignment) {
 651   _dram_size = (size_t)((size * G1MaxNewSizePercent)/100);
 652   size_t page_sz = os::vm_page_size() -1 ;
 653   _dram_size = (_dram_size + page_sz) & (~page_sz);
 654   // align sizes.
 655   _dram_size = align_down(_dram_size, alignment);
 656   _nvdimm_size = size - _dram_size;
 657   _nvdimm_size = (_nvdimm_size + page_sz) & (~page_sz);
 658   _nvdimm_size = align_down(_nvdimm_size, alignment);
 659 }
 660 
 661 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* heap_allocation_directory) : ReservedSpace() {
 662 
 663   if (size == 0) {
 664     return;
 665   }
 666 
 667   // if AllocateOldGen is used  
 668   if (AllocateOldGenAt != NULL) {
 669     _fd_for_nvdimm = os::create_file_for_heap(AllocateOldGenAt);
 670     if (_fd_for_nvdimm == -1) {
 671       vm_exit_during_initialization(
 672         err_msg("Could not create file for Heap at location %s", AllocateOldGenAt));
 673     }
 674     if (UseParallelOldGC) {
 675       // For ParallelOldGC, adaptive sizing picks _old_gen virtual space sizes as needed.
 676       // allocate Xmx on NVDIMM as adaptive sizing may put lot of pressure on NVDIMM.
 677       os::allocate_file(_fd_for_nvdimm, MaxHeapSize);
 678       os::set_nvdimm_fd(_fd_for_nvdimm);
 679       os::set_nvdimm_present(true);
 680     }
 681   } else {
 682     _fd_for_nvdimm = -1;
 683   }
 684 
 685   if (heap_allocation_directory != NULL) {
 686     _fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
 687     if (_fd_for_heap == -1) {
 688       vm_exit_during_initialization(
 689         err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
 690     }
 691   }
 692 
 693   // Heap size should be aligned to alignment, too.
 694   guarantee(is_aligned(size, alignment), "set by caller");
 695 
 696   char* base_nv = NULL;
 697   _nvdimm_base_nv = NULL;
 698   
 699   if (_fd_for_nvdimm != -1 && UseG1GC) {
 700     if (!UseCompressedOops) {
 701       // if compressed oops use requested address.
 702       initialize_g1gc_nvdimm_dram_sizes(size, alignment);
 703       base_nv = os::reserve_memory(size, NULL, alignment);
 704       _nvdimm_base_nv = base_nv+_nvdimm_size; // hint for allocation address of DRAM heap
 705     }
 706   }
 707 
 708   if (UseCompressedOops) {
 709     initialize_compressed_heap(size, alignment, large);
 710     if (_size > size) {
 711       // We allocated heap with noaccess prefix.
 712       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 713       // if we had to try at arbitrary address.
 714       establish_noaccess_prefix();
 715     }
 716   } else {
 717     if (_fd_for_nvdimm != -1 && UseG1GC) {
 718       initialize(_dram_size, alignment, large, NULL, false);
 719     } else {
 720       initialize(size, alignment, large, NULL, false);
 721     }
 722   }
 723 
 724   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 725          "area must be distinguishable from marks for mark-sweep");
 726   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 727          "area must be distinguishable from marks for mark-sweep");
 728 
 729   if (base() != NULL) {
 730     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 731     if (_fd_for_nvdimm != -1 && UseG1GC) {
 732       os::set_nvdimm_present(true);
 733       os::set_dram_heapbase((address)_base);
 734       os::set_nvdimm_heapbase((address)_nvdimm_base);
 735       os::set_nvdimm_fd(_fd_for_nvdimm);
 736       _size += _nvdimm_size;
 737       _base = _nvdimm_base;
 738       log_info(gc, heap)("Java DRAM Heap at [%p - %p] & NVDIMM Old Gen at [%p - %p] %ld \n", _nvdimm_base+_nvdimm_size, (char*)(_nvdimm_base+_nvdimm_size+_dram_size), _nvdimm_base, (char*)(_nvdimm_base+_nvdimm_size), size);
 739     }
 740   }
 741 
 742   if (_fd_for_heap != -1) {
 743     os::close(_fd_for_heap);
 744   }
 745 }
 746 
 747 // Reserve space for code segment.  Same as Java heap only we mark this as
 748 // executable.
 749 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 750                                      size_t rs_align,
 751                                      bool large) :
 752   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 753   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 754 }
 755 
 756 // VirtualSpace
 757 
 758 VirtualSpace::VirtualSpace() {
 759   _low_boundary           = NULL;
 760   _high_boundary          = NULL;
 761   _low                    = NULL;
 762   _high                   = NULL;
 763   _lower_high             = NULL;
 764   _middle_high            = NULL;
 765   _upper_high             = NULL;
 766   _lower_high_boundary    = NULL;
 767   _middle_high_boundary   = NULL;
 768   _upper_high_boundary    = NULL;
 769   _lower_alignment        = 0;
 770   _middle_alignment       = 0;
 771   _upper_alignment        = 0;
 772   _special                = false;
 773   _executable             = false;
 774 }
 775 
 776 
 777 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 778   const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
 779   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 780 }
 781 
 782 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 783   if(!rs.is_reserved()) return false;  // allocation failed.
 784   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 785   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 786 
 787   _low_boundary  = rs.base();
 788   _high_boundary = low_boundary() + rs.size();
 789 
 790   _low = low_boundary();
 791   _high = low();
 792 
 793   _special = rs.special();
 794   _executable = rs.executable();
 795 
 796   // When a VirtualSpace begins life at a large size, make all future expansion
 797   // and shrinking occur aligned to a granularity of large pages.  This avoids
 798   // fragmentation of physical addresses that inhibits the use of large pages
 799   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 800   // page size, the only spaces that get handled this way are codecache and
 801   // the heap itself, both of which provide a substantial performance
 802   // boost in many benchmarks when covered by large pages.
 803   //
 804   // No attempt is made to force large page alignment at the very top and
 805   // bottom of the space if they are not aligned so already.
 806   _lower_alignment  = os::vm_page_size();
 807   _middle_alignment = max_commit_granularity;
 808   _upper_alignment  = os::vm_page_size();
 809 
 810   // End of each region
 811   _lower_high_boundary = align_up(low_boundary(), middle_alignment());
 812   _middle_high_boundary = align_down(high_boundary(), middle_alignment());
 813   _upper_high_boundary = high_boundary();
 814 
 815   // High address of each region
 816   _lower_high = low_boundary();
 817   _middle_high = lower_high_boundary();
 818   _upper_high = middle_high_boundary();
 819 
 820   // commit to initial size
 821   if (committed_size > 0) {
 822     if (!expand_by(committed_size)) {
 823       return false;
 824     }
 825   }
 826   return true;
 827 }
 828 
 829 
 830 VirtualSpace::~VirtualSpace() {
 831   release();
 832 }
 833 
 834 
 835 void VirtualSpace::release() {
 836   // This does not release memory it reserved.
 837   // Caller must release via rs.release();
 838   _low_boundary           = NULL;
 839   _high_boundary          = NULL;
 840   _low                    = NULL;
 841   _high                   = NULL;
 842   _lower_high             = NULL;
 843   _middle_high            = NULL;
 844   _upper_high             = NULL;
 845   _lower_high_boundary    = NULL;
 846   _middle_high_boundary   = NULL;
 847   _upper_high_boundary    = NULL;
 848   _lower_alignment        = 0;
 849   _middle_alignment       = 0;
 850   _upper_alignment        = 0;
 851   _special                = false;
 852   _executable             = false;
 853 }
 854 
 855 
 856 size_t VirtualSpace::committed_size() const {
 857   return pointer_delta(high(), low(), sizeof(char));
 858 }
 859 
 860 
 861 size_t VirtualSpace::reserved_size() const {
 862   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 863 }
 864 
 865 
 866 size_t VirtualSpace::uncommitted_size()  const {
 867   return reserved_size() - committed_size();
 868 }
 869 
 870 size_t VirtualSpace::actual_committed_size() const {
 871   // Special VirtualSpaces commit all reserved space up front.
 872   if (special()) {
 873     return reserved_size();
 874   }
 875 
 876   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 877   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 878   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 879 
 880 #ifdef ASSERT
 881   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 882   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 883   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 884 
 885   if (committed_high > 0) {
 886     assert(committed_low == lower, "Must be");
 887     assert(committed_middle == middle, "Must be");
 888   }
 889 
 890   if (committed_middle > 0) {
 891     assert(committed_low == lower, "Must be");
 892   }
 893   if (committed_middle < middle) {
 894     assert(committed_high == 0, "Must be");
 895   }
 896 
 897   if (committed_low < lower) {
 898     assert(committed_high == 0, "Must be");
 899     assert(committed_middle == 0, "Must be");
 900   }
 901 #endif
 902 
 903   return committed_low + committed_middle + committed_high;
 904 }
 905 
 906 
 907 bool VirtualSpace::contains(const void* p) const {
 908   return low() <= (const char*) p && (const char*) p < high();
 909 }
 910 
 911 static void pretouch_expanded_memory(void* start, void* end) {
 912   assert(is_aligned(start, os::vm_page_size()), "Unexpected alignment");
 913   assert(is_aligned(end,   os::vm_page_size()), "Unexpected alignment");
 914 
 915   os::pretouch_memory(start, end);
 916 }
 917 
 918 static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) {
 919   if (os::commit_memory(start, size, alignment, executable)) {
 920     if (pre_touch || AlwaysPreTouch) {
 921       pretouch_expanded_memory(start, start + size);
 922     }
 923     return true;
 924   }
 925 
 926   debug_only(warning(
 927       "INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMAT
 928       " size=" SIZE_FORMAT ", executable=%d) failed",
 929       p2i(start), p2i(start + size), size, executable);)
 930 
 931   return false;
 932 }
 933 
 934 /*
 935    First we need to determine if a particular virtual space is using large
 936    pages.  This is done at the initialize function and only virtual spaces
 937    that are larger than LargePageSizeInBytes use large pages.  Once we
 938    have determined this, all expand_by and shrink_by calls must grow and
 939    shrink by large page size chunks.  If a particular request
 940    is within the current large page, the call to commit and uncommit memory
 941    can be ignored.  In the case that the low and high boundaries of this
 942    space is not large page aligned, the pages leading to the first large
 943    page address and the pages after the last large page address must be
 944    allocated with default pages.
 945 */
 946 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 947   if (uncommitted_size() < bytes) {
 948     return false;
 949   }
 950 
 951   if (special()) {
 952     // don't commit memory if the entire space is pinned in memory
 953     _high += bytes;
 954     return true;
 955   }
 956 
 957   char* previous_high = high();
 958   char* unaligned_new_high = high() + bytes;
 959   assert(unaligned_new_high <= high_boundary(), "cannot expand by more than upper boundary");
 960 
 961   // Calculate where the new high for each of the regions should be.  If
 962   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 963   // then the unaligned lower and upper new highs would be the
 964   // lower_high() and upper_high() respectively.
 965   char* unaligned_lower_new_high =  MIN2(unaligned_new_high, lower_high_boundary());
 966   char* unaligned_middle_new_high = MIN2(unaligned_new_high, middle_high_boundary());
 967   char* unaligned_upper_new_high =  MIN2(unaligned_new_high, upper_high_boundary());
 968 
 969   // Align the new highs based on the regions alignment.  lower and upper
 970   // alignment will always be default page size.  middle alignment will be
 971   // LargePageSizeInBytes if the actual size of the virtual space is in
 972   // fact larger than LargePageSizeInBytes.
 973   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
 974   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
 975   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
 976 
 977   // Determine which regions need to grow in this expand_by call.
 978   // If you are growing in the lower region, high() must be in that
 979   // region so calculate the size based on high().  For the middle and
 980   // upper regions, determine the starting point of growth based on the
 981   // location of high().  By getting the MAX of the region's low address
 982   // (or the previous region's high address) and high(), we can tell if it
 983   // is an intra or inter region growth.
 984   size_t lower_needs = 0;
 985   if (aligned_lower_new_high > lower_high()) {
 986     lower_needs = pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 987   }
 988   size_t middle_needs = 0;
 989   if (aligned_middle_new_high > middle_high()) {
 990     middle_needs = pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 991   }
 992   size_t upper_needs = 0;
 993   if (aligned_upper_new_high > upper_high()) {
 994     upper_needs = pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 995   }
 996 
 997   // Check contiguity.
 998   assert(low_boundary() <= lower_high() && lower_high() <= lower_high_boundary(),
 999          "high address must be contained within the region");
1000   assert(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary(),
1001          "high address must be contained within the region");
1002   assert(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary(),
1003          "high address must be contained within the region");
1004 
1005   // Commit regions
1006   if (lower_needs > 0) {
1007     assert(lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region");
1008     if (!commit_expanded(lower_high(), lower_needs, _lower_alignment, pre_touch, _executable)) {
1009       return false;
1010     }
1011     _lower_high += lower_needs;
1012   }
1013 
1014   if (middle_needs > 0) {
1015     assert(middle_high() + middle_needs <= middle_high_boundary(), "must not expand beyond region");
1016     if (!commit_expanded(middle_high(), middle_needs, _middle_alignment, pre_touch, _executable)) {
1017       return false;
1018     }
1019     _middle_high += middle_needs;
1020   }
1021 
1022   if (upper_needs > 0) {
1023     assert(upper_high() + upper_needs <= upper_high_boundary(), "must not expand beyond region");
1024     if (!commit_expanded(upper_high(), upper_needs, _upper_alignment, pre_touch, _executable)) {
1025       return false;
1026     }
1027     _upper_high += upper_needs;
1028   }
1029 
1030   _high += bytes;
1031   return true;
1032 }
1033 
1034 // A page is uncommitted if the contents of the entire page is deemed unusable.
1035 // Continue to decrement the high() pointer until it reaches a page boundary
1036 // in which case that particular page can now be uncommitted.
1037 void VirtualSpace::shrink_by(size_t size) {
1038   if (committed_size() < size)
1039     fatal("Cannot shrink virtual space to negative size");
1040 
1041   if (special()) {
1042     // don't uncommit if the entire space is pinned in memory
1043     _high -= size;
1044     return;
1045   }
1046 
1047   char* unaligned_new_high = high() - size;
1048   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
1049 
1050   // Calculate new unaligned address
1051   char* unaligned_upper_new_high =
1052     MAX2(unaligned_new_high, middle_high_boundary());
1053   char* unaligned_middle_new_high =
1054     MAX2(unaligned_new_high, lower_high_boundary());
1055   char* unaligned_lower_new_high =
1056     MAX2(unaligned_new_high, low_boundary());
1057 
1058   // Align address to region's alignment
1059   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
1060   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
1061   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
1062 
1063   // Determine which regions need to shrink
1064   size_t upper_needs = 0;
1065   if (aligned_upper_new_high < upper_high()) {
1066     upper_needs =
1067       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
1068   }
1069   size_t middle_needs = 0;
1070   if (aligned_middle_new_high < middle_high()) {
1071     middle_needs =
1072       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
1073   }
1074   size_t lower_needs = 0;
1075   if (aligned_lower_new_high < lower_high()) {
1076     lower_needs =
1077       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
1078   }
1079 
1080   // Check contiguity.
1081   assert(middle_high_boundary() <= upper_high() &&
1082          upper_high() <= upper_high_boundary(),
1083          "high address must be contained within the region");
1084   assert(lower_high_boundary() <= middle_high() &&
1085          middle_high() <= middle_high_boundary(),
1086          "high address must be contained within the region");
1087   assert(low_boundary() <= lower_high() &&
1088          lower_high() <= lower_high_boundary(),
1089          "high address must be contained within the region");
1090 
1091   // Uncommit
1092   if (upper_needs > 0) {
1093     assert(middle_high_boundary() <= aligned_upper_new_high &&
1094            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
1095            "must not shrink beyond region");
1096     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
1097       debug_only(warning("os::uncommit_memory failed"));
1098       return;
1099     } else {
1100       _upper_high -= upper_needs;
1101     }
1102   }
1103   if (middle_needs > 0) {
1104     assert(lower_high_boundary() <= aligned_middle_new_high &&
1105            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
1106            "must not shrink beyond region");
1107     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
1108       debug_only(warning("os::uncommit_memory failed"));
1109       return;
1110     } else {
1111       _middle_high -= middle_needs;
1112     }
1113   }
1114   if (lower_needs > 0) {
1115     assert(low_boundary() <= aligned_lower_new_high &&
1116            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
1117            "must not shrink beyond region");
1118     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
1119       debug_only(warning("os::uncommit_memory failed"));
1120       return;
1121     } else {
1122       _lower_high -= lower_needs;
1123     }
1124   }
1125 
1126   _high -= size;
1127 }
1128 
1129 #ifndef PRODUCT
1130 void VirtualSpace::check_for_contiguity() {
1131   // Check contiguity.
1132   assert(low_boundary() <= lower_high() &&
1133          lower_high() <= lower_high_boundary(),
1134          "high address must be contained within the region");
1135   assert(lower_high_boundary() <= middle_high() &&
1136          middle_high() <= middle_high_boundary(),
1137          "high address must be contained within the region");
1138   assert(middle_high_boundary() <= upper_high() &&
1139          upper_high() <= upper_high_boundary(),
1140          "high address must be contained within the region");
1141   assert(low() >= low_boundary(), "low");
1142   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
1143   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
1144   assert(high() <= upper_high(), "upper high");
1145 }
1146 
1147 void VirtualSpace::print_on(outputStream* out) {
1148   out->print   ("Virtual space:");
1149   if (special()) out->print(" (pinned in memory)");
1150   out->cr();
1151   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
1152   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
1153   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low()), p2i(high()));
1154   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low_boundary()), p2i(high_boundary()));
1155 }
1156 
1157 void VirtualSpace::print() {
1158   print_on(tty);
1159 }
1160 
1161 /////////////// Unit tests ///////////////
1162 
1163 #ifndef PRODUCT
1164 
1165 #define test_log(...) \
1166   do {\
1167     if (VerboseInternalVMTests) { \
1168       tty->print_cr(__VA_ARGS__); \
1169       tty->flush(); \
1170     }\
1171   } while (false)
1172 
1173 class TestReservedSpace : AllStatic {
1174  public:
1175   static void small_page_write(void* addr, size_t size) {
1176     size_t page_size = os::vm_page_size();
1177 
1178     char* end = (char*)addr + size;
1179     for (char* p = (char*)addr; p < end; p += page_size) {
1180       *p = 1;
1181     }
1182   }
1183 
1184   static void release_memory_for_test(ReservedSpace rs) {
1185     if (rs.special()) {
1186       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1187     } else {
1188       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1189     }
1190   }
1191 
1192   static void test_reserved_space1(size_t size, size_t alignment) {
1193     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
1194 
1195     assert(is_aligned(size, alignment), "Incorrect input parameters");
1196 
1197     ReservedSpace rs(size,          // size
1198                      alignment,     // alignment
1199                      UseLargePages, // large
1200                      (char *)NULL); // requested_address
1201 
1202     test_log(" rs.special() == %d", rs.special());
1203 
1204     assert(rs.base() != NULL, "Must be");
1205     assert(rs.size() == size, "Must be");
1206 
1207     assert(is_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1208     assert(is_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1209 
1210     if (rs.special()) {
1211       small_page_write(rs.base(), size);
1212     }
1213 
1214     release_memory_for_test(rs);
1215   }
1216 
1217   static void test_reserved_space2(size_t size) {
1218     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
1219 
1220     assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1221 
1222     ReservedSpace rs(size);
1223 
1224     test_log(" rs.special() == %d", rs.special());
1225 
1226     assert(rs.base() != NULL, "Must be");
1227     assert(rs.size() == size, "Must be");
1228 
1229     if (rs.special()) {
1230       small_page_write(rs.base(), size);
1231     }
1232 
1233     release_memory_for_test(rs);
1234   }
1235 
1236   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1237     test_log("test_reserved_space3(%p, %p, %d)",
1238         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
1239 
1240     if (size < alignment) {
1241       // Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test.
1242       assert((size_t)os::vm_page_size() == os::large_page_size(), "Test needs further refinement");
1243       return;
1244     }
1245 
1246     assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1247     assert(is_aligned(size, alignment), "Must be at least aligned against alignment");
1248 
1249     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1250 
1251     ReservedSpace rs(size, alignment, large, false);
1252 
1253     test_log(" rs.special() == %d", rs.special());
1254 
1255     assert(rs.base() != NULL, "Must be");
1256     assert(rs.size() == size, "Must be");
1257 
1258     if (rs.special()) {
1259       small_page_write(rs.base(), size);
1260     }
1261 
1262     release_memory_for_test(rs);
1263   }
1264 
1265 
1266   static void test_reserved_space1() {
1267     size_t size = 2 * 1024 * 1024;
1268     size_t ag   = os::vm_allocation_granularity();
1269 
1270     test_reserved_space1(size,      ag);
1271     test_reserved_space1(size * 2,  ag);
1272     test_reserved_space1(size * 10, ag);
1273   }
1274 
1275   static void test_reserved_space2() {
1276     size_t size = 2 * 1024 * 1024;
1277     size_t ag = os::vm_allocation_granularity();
1278 
1279     test_reserved_space2(size * 1);
1280     test_reserved_space2(size * 2);
1281     test_reserved_space2(size * 10);
1282     test_reserved_space2(ag);
1283     test_reserved_space2(size - ag);
1284     test_reserved_space2(size);
1285     test_reserved_space2(size + ag);
1286     test_reserved_space2(size * 2);
1287     test_reserved_space2(size * 2 - ag);
1288     test_reserved_space2(size * 2 + ag);
1289     test_reserved_space2(size * 3);
1290     test_reserved_space2(size * 3 - ag);
1291     test_reserved_space2(size * 3 + ag);
1292     test_reserved_space2(size * 10);
1293     test_reserved_space2(size * 10 + size / 2);
1294   }
1295 
1296   static void test_reserved_space3() {
1297     size_t ag = os::vm_allocation_granularity();
1298 
1299     test_reserved_space3(ag,      ag    , false);
1300     test_reserved_space3(ag * 2,  ag    , false);
1301     test_reserved_space3(ag * 3,  ag    , false);
1302     test_reserved_space3(ag * 2,  ag * 2, false);
1303     test_reserved_space3(ag * 4,  ag * 2, false);
1304     test_reserved_space3(ag * 8,  ag * 2, false);
1305     test_reserved_space3(ag * 4,  ag * 4, false);
1306     test_reserved_space3(ag * 8,  ag * 4, false);
1307     test_reserved_space3(ag * 16, ag * 4, false);
1308 
1309     if (UseLargePages) {
1310       size_t lp = os::large_page_size();
1311 
1312       // Without large pages
1313       test_reserved_space3(lp,     ag * 4, false);
1314       test_reserved_space3(lp * 2, ag * 4, false);
1315       test_reserved_space3(lp * 4, ag * 4, false);
1316       test_reserved_space3(lp,     lp    , false);
1317       test_reserved_space3(lp * 2, lp    , false);
1318       test_reserved_space3(lp * 3, lp    , false);
1319       test_reserved_space3(lp * 2, lp * 2, false);
1320       test_reserved_space3(lp * 4, lp * 2, false);
1321       test_reserved_space3(lp * 8, lp * 2, false);
1322 
1323       // With large pages
1324       test_reserved_space3(lp, ag * 4    , true);
1325       test_reserved_space3(lp * 2, ag * 4, true);
1326       test_reserved_space3(lp * 4, ag * 4, true);
1327       test_reserved_space3(lp, lp        , true);
1328       test_reserved_space3(lp * 2, lp    , true);
1329       test_reserved_space3(lp * 3, lp    , true);
1330       test_reserved_space3(lp * 2, lp * 2, true);
1331       test_reserved_space3(lp * 4, lp * 2, true);
1332       test_reserved_space3(lp * 8, lp * 2, true);
1333     }
1334   }
1335 
1336   static void test_reserved_space() {
1337     test_reserved_space1();
1338     test_reserved_space2();
1339     test_reserved_space3();
1340   }
1341 };
1342 
1343 void TestReservedSpace_test() {
1344   TestReservedSpace::test_reserved_space();
1345 }
1346 
1347 #define assert_equals(actual, expected)  \
1348   assert(actual == expected,             \
1349          "Got " SIZE_FORMAT " expected " \
1350          SIZE_FORMAT, actual, expected);
1351 
1352 #define assert_ge(value1, value2)                  \
1353   assert(value1 >= value2,                         \
1354          "'" #value1 "': " SIZE_FORMAT " '"        \
1355          #value2 "': " SIZE_FORMAT, value1, value2);
1356 
1357 #define assert_lt(value1, value2)                  \
1358   assert(value1 < value2,                          \
1359          "'" #value1 "': " SIZE_FORMAT " '"        \
1360          #value2 "': " SIZE_FORMAT, value1, value2);
1361 
1362 
1363 class TestVirtualSpace : AllStatic {
1364   enum TestLargePages {
1365     Default,
1366     Disable,
1367     Reserve,
1368     Commit
1369   };
1370 
1371   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
1372     switch(mode) {
1373     default:
1374     case Default:
1375     case Reserve:
1376       return ReservedSpace(reserve_size_aligned);
1377     case Disable:
1378     case Commit:
1379       return ReservedSpace(reserve_size_aligned,
1380                            os::vm_allocation_granularity(),
1381                            /* large */ false, /* exec */ false);
1382     }
1383   }
1384 
1385   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1386     switch(mode) {
1387     default:
1388     case Default:
1389     case Reserve:
1390       return vs.initialize(rs, 0);
1391     case Disable:
1392       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1393     case Commit:
1394       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1395     }
1396   }
1397 
1398  public:
1399   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1400                                                         TestLargePages mode = Default) {
1401     size_t granularity = os::vm_allocation_granularity();
1402     size_t reserve_size_aligned = align_up(reserve_size, granularity);
1403 
1404     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1405 
1406     assert(reserved.is_reserved(), "Must be");
1407 
1408     VirtualSpace vs;
1409     bool initialized = initialize_virtual_space(vs, reserved, mode);
1410     assert(initialized, "Failed to initialize VirtualSpace");
1411 
1412     vs.expand_by(commit_size, false);
1413 
1414     if (vs.special()) {
1415       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1416     } else {
1417       assert_ge(vs.actual_committed_size(), commit_size);
1418       // Approximate the commit granularity.
1419       // Make sure that we don't commit using large pages
1420       // if large pages has been disabled for this VirtualSpace.
1421       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1422                                    os::vm_page_size() : os::large_page_size();
1423       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1424     }
1425 
1426     reserved.release();
1427   }
1428 
1429   static void test_virtual_space_actual_committed_space_one_large_page() {
1430     if (!UseLargePages) {
1431       return;
1432     }
1433 
1434     size_t large_page_size = os::large_page_size();
1435 
1436     ReservedSpace reserved(large_page_size, large_page_size, true, false);
1437 
1438     assert(reserved.is_reserved(), "Must be");
1439 
1440     VirtualSpace vs;
1441     bool initialized = vs.initialize(reserved, 0);
1442     assert(initialized, "Failed to initialize VirtualSpace");
1443 
1444     vs.expand_by(large_page_size, false);
1445 
1446     assert_equals(vs.actual_committed_size(), large_page_size);
1447 
1448     reserved.release();
1449   }
1450 
1451   static void test_virtual_space_actual_committed_space() {
1452     test_virtual_space_actual_committed_space(4 * K, 0);
1453     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1454     test_virtual_space_actual_committed_space(8 * K, 0);
1455     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1456     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1457     test_virtual_space_actual_committed_space(12 * K, 0);
1458     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1459     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1460     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1461     test_virtual_space_actual_committed_space(64 * K, 0);
1462     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1463     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1464     test_virtual_space_actual_committed_space(2 * M, 0);
1465     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1466     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1467     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1468     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1469     test_virtual_space_actual_committed_space(10 * M, 0);
1470     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1471     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1472     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1473     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1474     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1475     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1476   }
1477 
1478   static void test_virtual_space_disable_large_pages() {
1479     if (!UseLargePages) {
1480       return;
1481     }
1482     // These test cases verify that if we force VirtualSpace to disable large pages
1483     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1484     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1485     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1486     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1487     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1488     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1489     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1490 
1491     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1492     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1493     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1494     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1495     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1496     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1497     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1498 
1499     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1500     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1501     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1502     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1503     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1504     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1505     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1506   }
1507 
1508   static void test_virtual_space() {
1509     test_virtual_space_actual_committed_space();
1510     test_virtual_space_actual_committed_space_one_large_page();
1511     test_virtual_space_disable_large_pages();
1512   }
1513 };
1514 
1515 void TestVirtualSpace_test() {
1516   TestVirtualSpace::test_virtual_space();
1517 }
1518 
1519 #endif // PRODUCT
1520 
1521 #endif