1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "logging/log.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "memory/virtualspace.hpp"
  29 #include "oops/markOop.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "runtime/os.inline.hpp"
  32 #include "services/memTracker.hpp"
  33 #include "utilities/align.hpp"
  34 
  35 // ReservedSpace
  36 
  37 // Dummy constructor
  38 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0), 
  39     _nvdimm_base_nv(NULL), _nvdimm_size(0), _dram_size(0),
  40     _alignment(0), _special(false), _executable(false), _fd_for_heap(-1) {
  41 }
  42 
  43 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1), 
  44     _nvdimm_base_nv(NULL), _nvdimm_size(0), _dram_size(0) {
  45   bool has_preferred_page_size = preferred_page_size != 0;
  46   // Want to use large pages where possible and pad with small pages.
  47   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
  48   bool large_pages = page_size != (size_t)os::vm_page_size();
  49   size_t alignment;
  50   if (large_pages && has_preferred_page_size) {
  51     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  52     // ReservedSpace initialization requires size to be aligned to the given
  53     // alignment. Align the size up.
  54     size = align_up(size, alignment);
  55   } else {
  56     // Don't force the alignment to be large page aligned,
  57     // since that will waste memory.
  58     alignment = os::vm_allocation_granularity();
  59   }
  60   initialize(size, alignment, large_pages, NULL, false);
  61 }
  62 
  63 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  64                              bool large,
  65                              char* requested_address) : _fd_for_heap(-1), 
  66                              _nvdimm_base_nv(NULL), _nvdimm_size(0), _dram_size(0) {
  67   initialize(size, alignment, large, requested_address, false);
  68 }
  69 
  70 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  71                              bool large,
  72                              bool executable) : _fd_for_heap(-1), 
  73                              _nvdimm_base_nv(NULL), _nvdimm_size(0), _dram_size(0) {
  74   initialize(size, alignment, large, NULL, executable);
  75 }
  76 
  77 // Helper method
  78 static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) {
  79   if (is_file_mapped) {
  80     if (!os::unmap_memory(base, size)) {
  81       fatal("os::unmap_memory failed");
  82     }
  83   } else if (!os::release_memory(base, size)) {
  84     fatal("os::release_memory failed");
  85   }
  86 }
  87 
  88 // Helper method.
  89 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  90                                            const size_t size, bool special, bool is_file_mapped = false)
  91 {
  92   if (base == requested_address || requested_address == NULL)
  93     return false; // did not fail
  94 
  95   if (base != NULL) {
  96     // Different reserve address may be acceptable in other cases
  97     // but for compressed oops heap should be at requested address.
  98     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  99     log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
 100     // OS ignored requested address. Try different address.
 101     if (special) {
 102       if (!os::release_memory_special(base, size)) {
 103         fatal("os::release_memory_special failed");
 104       }
 105     } else {
 106       unmap_or_release_memory(base, size, is_file_mapped);
 107     }
 108   }
 109   return true;
 110 }
 111 
 112 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
 113                                char* requested_address,
 114                                bool executable) {
 115   const size_t granularity = os::vm_allocation_granularity();
 116   assert((size & (granularity - 1)) == 0,
 117          "size not aligned to os::vm_allocation_granularity()");
 118   assert((alignment & (granularity - 1)) == 0,
 119          "alignment not aligned to os::vm_allocation_granularity()");
 120   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 121          "not a power of 2");
 122 
 123   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 124 
 125   _base = NULL;
 126   _size = 0;
 127   _special = false;
 128   _executable = executable;
 129   _alignment = 0;
 130   _noaccess_prefix = 0;
 131   if (size == 0) {
 132     return;
 133   }
 134 
 135   // If OS doesn't support demand paging for large page memory, we need
 136   // to use reserve_memory_special() to reserve and pin the entire region.
 137   // If there is a backing file directory for this space then whether
 138   // large pages are allocated is up to the filesystem of the backing file.
 139   // So we ignore the UseLargePages flag in this case.
 140   bool special = large && !os::can_commit_large_page_memory();
 141   if (special && _fd_for_heap != -1) {
 142     special = false;
 143     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 144       !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 145       log_debug(gc, heap)("Ignoring UseLargePages since large page support is up to the file system of the backing file for Java heap");
 146     }
 147   }
 148 
 149   char* base = NULL;
 150   char* nvdimm_base = NULL;
 151 
 152   if (special) {
 153 
 154     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 155 
 156     if (base != NULL) {
 157       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 158         // OS ignored requested address. Try different address.
 159         return;
 160       }
 161       // Check alignment constraints.
 162       assert((uintptr_t) base % alignment == 0,
 163              "Large pages returned a non-aligned address, base: "
 164              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 165              p2i(base), alignment);
 166       _special = true;
 167     } else {
 168       // failed; try to reserve regular memory below
 169       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 170                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 171         log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 172       }
 173     }
 174   }
 175   int fd = -1;
 176   if (AllocateOldGenAt == NULL && _fd_for_heap != -1) {
 177     // AllocateHeapAt is in use.
 178     fd = _fd_for_heap;
 179   }
 180   if (base == NULL) {
 181     // Optimistically assume that the OSes returns an aligned base pointer.
 182     // When reserving a large address range, most OSes seem to align to at
 183     // least 64K.
 184 
 185     // If the memory was requested at a particular address, use
 186     // os::attempt_reserve_memory_at() to avoid over mapping something
 187     // important.  If available space is not detected, return NULL.
 188 
 189     if (requested_address != 0) {
 190       base = os::attempt_reserve_memory_at(size, requested_address, fd);
 191       if (failed_to_reserve_as_requested(base, requested_address, size, false, fd != -1)) {
 192         // OS ignored requested address. Try different address.
 193         base = NULL;
 194       }
 195     } else {
 196       if (_nvdimm_base_nv != NULL && _fd_for_heap != -1) {
 197         base = os::reserve_memory(_dram_size, _nvdimm_base_nv, alignment, fd);
 198       } else {
 199         base = os::reserve_memory(size, NULL, alignment, fd);
 200       }
 201     }
 202 
 203     if (base == NULL) return;
 204 
 205     // Check alignment constraints
 206     if ((((size_t)base) & (alignment - 1)) != 0) {
 207       // Base not aligned, retry
 208       unmap_or_release_memory(base, size, fd != -1 /*is_file_mapped*/);
 209 
 210       // Make sure that size is aligned
 211       size = align_up(size, alignment);
 212       base = os::reserve_memory_aligned(size, alignment, fd);
 213 
 214       if (requested_address != 0 &&
 215           failed_to_reserve_as_requested(base, requested_address, size, false, fd != -1)) {
 216         // As a result of the alignment constraints, the allocated base differs
 217         // from the requested address. Return back to the caller who can
 218         // take remedial action (like try again without a requested address).
 219         assert(_base == NULL, "should be");
 220         return;
 221       }
 222     }
 223   }
 224   // Done
 225   _base = base;
 226   _nvdimm_base = _base-_nvdimm_size;
 227   _nvdimm_base_nv = NULL;
 228   _dram_size = (size_t)size;
 229   _size = size;
 230   _alignment = alignment;
 231   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 232   if (fd != -1) {
 233     _special = true;
 234   }
 235 }
 236 
 237 
 238 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 239                              bool special, bool executable) {
 240   assert((size % os::vm_allocation_granularity()) == 0,
 241          "size not allocation aligned");
 242   _base = base;
 243   _size = size;
 244   _nvdimm_base = NULL; 
 245   _nvdimm_base_nv = NULL;
 246   _dram_size = (size_t)size;
 247   _alignment = alignment;
 248   _noaccess_prefix = 0;
 249   _special = special;
 250   _executable = executable;
 251 }
 252 
 253 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 254                                         bool split, bool realloc) {
 255   assert(partition_size <= size(), "partition failed");
 256   if (split) {
 257     os::split_reserved_memory(base(), size(), partition_size, realloc);
 258   }
 259   ReservedSpace result(base(), partition_size, alignment, special(),
 260                        executable());
 261   return result;
 262 }
 263 
 264 
 265 ReservedSpace
 266 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 267   assert(partition_size <= size(), "partition failed");
 268   ReservedSpace result(base() + partition_size, size() - partition_size,
 269                        alignment, special(), executable());
 270   return result;
 271 }
 272 
 273 
 274 size_t ReservedSpace::page_align_size_up(size_t size) {
 275   return align_up(size, os::vm_page_size());
 276 }
 277 
 278 
 279 size_t ReservedSpace::page_align_size_down(size_t size) {
 280   return align_down(size, os::vm_page_size());
 281 }
 282 
 283 
 284 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 285   return align_up(size, os::vm_allocation_granularity());
 286 }
 287 
 288 
 289 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 290   return align_down(size, os::vm_allocation_granularity());
 291 }
 292 
 293 
 294 void ReservedSpace::release() {
 295   if (is_reserved()) {
 296     char *real_base = _base - _noaccess_prefix;
 297     const size_t real_size = _size + _noaccess_prefix;
 298     // unmap nvdimm
 299     if (_nvdimm_base != NULL) {
 300       os::unmap_memory(_nvdimm_base, _nvdimm_size);
 301     }
 302     if (special()) {
 303       if (_fd_for_heap != -1) {
 304         os::unmap_memory(real_base, real_size);
 305       } else {
 306         os::release_memory_special(real_base, real_size);
 307       }
 308     } else{
 309       os::release_memory(real_base, real_size);
 310     }
 311     _base = NULL;
 312     _nvdimm_base = NULL;
 313     _nvdimm_base_nv = NULL;
 314     _dram_size = 0;
 315     _nvdimm_size = 0;
 316     _size = 0;
 317     _noaccess_prefix = 0;
 318     _alignment = 0;
 319     _special = false;
 320     _executable = false;
 321   }
 322 }
 323 
 324 static size_t noaccess_prefix_size(size_t alignment) {
 325   return lcm(os::vm_page_size(), alignment);
 326 }
 327 
 328 void ReservedHeapSpace::establish_noaccess_prefix() {
 329   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 330   _noaccess_prefix = noaccess_prefix_size(_alignment);
 331 
 332   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 333     if (true
 334         WIN64_ONLY(&& !UseLargePages)
 335         AIX_ONLY(&& os::vm_page_size() != 64*K)) {
 336       // Protect memory at the base of the allocated region.
 337       // If special, the page was committed (only matters on windows)
 338       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 339         fatal("cannot protect protection page");
 340       }
 341       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
 342                                  PTR_FORMAT " / " INTX_FORMAT " bytes",
 343                                  p2i(_base),
 344                                  _noaccess_prefix);
 345       assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
 346     } else {
 347       Universe::set_narrow_oop_use_implicit_null_checks(false);
 348     }
 349   }
 350 
 351   _base += _noaccess_prefix;
 352   _size -= _noaccess_prefix;
 353   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 354 }
 355 
 356 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 357 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 358 // might still fulfill the wishes of the caller.
 359 // Assures the memory is aligned to 'alignment'.
 360 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 361 void ReservedHeapSpace::try_reserve_heap(size_t size,
 362                                          size_t alignment,
 363                                          bool large,
 364                                          char* requested_address) {
 365   if (_base != NULL) {
 366     // We tried before, but we didn't like the address delivered.
 367     release();
 368   }
 369 
 370   if (_fd_for_heap != -1 && UseG1GC && AllocateOldGenAt != NULL) {
 371     char* base_nv = os::reserve_memory(size, requested_address, alignment);
 372     initialize_g1gc_nvdimm_dram_sizes(size, alignment);
 373     _nvdimm_base_nv = base_nv+_nvdimm_size; // hint for allocation address of DRAM COMPRESSED HEAP.
 374   }
 375 
 376   
 377   // If OS doesn't support demand paging for large page memory, we need
 378   // to use reserve_memory_special() to reserve and pin the entire region.
 379   // If there is a backing file directory for this space then whether
 380   // large pages are allocated is up to the filesystem of the backing file.
 381   // So we ignore the UseLargePages flag in this case.
 382   bool special = large && !os::can_commit_large_page_memory();
 383   if (special && _fd_for_heap != -1) {
 384     special = false;
 385     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 386                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 387       log_debug(gc, heap)("Cannot allocate large pages for Java Heap when AllocateHeapAt option is set.");
 388     }
 389   }
 390   char* base = NULL;
 391   char* nvdimm_base = NULL;
 392 
 393   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
 394                              " heap of size " SIZE_FORMAT_HEX,
 395                              p2i(requested_address),
 396                              size);
 397 
 398   if (special) {
 399     base = os::reserve_memory_special(size, alignment, requested_address, false);
 400 
 401     if (base != NULL) {
 402       // Check alignment constraints.
 403       assert((uintptr_t) base % alignment == 0,
 404              "Large pages returned a non-aligned address, base: "
 405              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 406              p2i(base), alignment);
 407       _special = true;
 408     }
 409   }
 410 
 411   if (base == NULL) {
 412     // Failed; try to reserve regular memory below
 413     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 414                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 415       log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 416     }
 417 
 418     // Optimistically assume that the OSes returns an aligned base pointer.
 419     // When reserving a large address range, most OSes seem to align to at
 420     // least 64K.
 421 
 422     // If the memory was requested at a particular address, use
 423     // os::attempt_reserve_memory_at() to avoid over mapping something
 424     // important.  If available space is not detected, return NULL.
 425 
 426     if (requested_address != 0) {
 427       if (_nvdimm_base_nv != NULL && _fd_for_heap != -1 && AllocateOldGenAt != NULL) {
 428         // first unmap so that OS does not keep trying.
 429         os::unmap_memory(_nvdimm_base_nv, _dram_size);
 430         base = os::attempt_reserve_memory_at(_dram_size, _nvdimm_base_nv);
 431       } else {
 432         base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
 433       }
 434     } else {
 435       if (_nvdimm_base_nv != NULL && _fd_for_heap != -1 && AllocateOldGenAt != NULL) {
 436         // first unmap so that OS does not keep trying.
 437         os::unmap_memory(_nvdimm_base_nv, _dram_size);
 438         base = os::reserve_memory(_dram_size, _nvdimm_base_nv, alignment);
 439       } else {
 440         base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
 441       }
 442     }
 443   }
 444   if (base == NULL) { return; }
 445 
 446   // Done
 447   _base = base;
 448   _nvdimm_base = _base-_nvdimm_size;
 449   if (_nvdimm_base_nv != NULL && _fd_for_heap != -1 && AllocateOldGenAt != NULL) {
 450     _size = _dram_size;
 451   } else {
 452     _size = size;
 453   }
 454   _alignment = alignment;
 455 
 456   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 457   if (_fd_for_heap != -1 && AllocateOldGenAt == NULL) {
 458     _special = true;
 459   }
 460 
 461   // Check alignment constraints
 462   if ((((size_t)base) & (alignment - 1)) != 0) {
 463     // Base not aligned, retry.
 464     release();
 465   }
 466 }
 467 
 468 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 469                                           char *lowest_start,
 470                                           size_t attach_point_alignment,
 471                                           char *aligned_heap_base_min_address,
 472                                           char *upper_bound,
 473                                           size_t size,
 474                                           size_t alignment,
 475                                           bool large) {
 476   const size_t attach_range = highest_start - lowest_start;
 477   // Cap num_attempts at possible number.
 478   // At least one is possible even for 0 sized attach range.
 479   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
 480   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
 481 
 482   const size_t stepsize = (attach_range == 0) ? // Only one try.
 483     (size_t) highest_start : align_up(attach_range / num_attempts_to_try, attach_point_alignment);
 484 
 485   // Try attach points from top to bottom.
 486   char* attach_point = highest_start;
 487   while (attach_point >= lowest_start  &&
 488          attach_point <= highest_start &&  // Avoid wrap around.
 489          ((_base == NULL) ||
 490           (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
 491     try_reserve_heap(size, alignment, large, attach_point);
 492     attach_point -= stepsize;
 493   }
 494 }
 495 
 496 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
 497 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
 498 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
 499 
 500 // Helper for heap allocation. Returns an array with addresses
 501 // (OS-specific) which are suited for disjoint base mode. Array is
 502 // NULL terminated.
 503 static char** get_attach_addresses_for_disjoint_mode() {
 504   static uint64_t addresses[] = {
 505      2 * SIZE_32G,
 506      3 * SIZE_32G,
 507      4 * SIZE_32G,
 508      8 * SIZE_32G,
 509     10 * SIZE_32G,
 510      1 * SIZE_64K * SIZE_32G,
 511      2 * SIZE_64K * SIZE_32G,
 512      3 * SIZE_64K * SIZE_32G,
 513      4 * SIZE_64K * SIZE_32G,
 514     16 * SIZE_64K * SIZE_32G,
 515     32 * SIZE_64K * SIZE_32G,
 516     34 * SIZE_64K * SIZE_32G,
 517     0
 518   };
 519 
 520   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
 521   // the array is sorted.
 522   uint i = 0;
 523   while (addresses[i] != 0 &&
 524          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
 525     i++;
 526   }
 527   uint start = i;
 528 
 529   // Avoid more steps than requested.
 530   i = 0;
 531   while (addresses[start+i] != 0) {
 532     if (i == HeapSearchSteps) {
 533       addresses[start+i] = 0;
 534       break;
 535     }
 536     i++;
 537   }
 538 
 539   return (char**) &addresses[start];
 540 }
 541 
 542 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
 543   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
 544             "can not allocate compressed oop heap for this size");
 545   guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
 546   assert(HeapBaseMinAddress > 0, "sanity");
 547 
 548   const size_t granularity = os::vm_allocation_granularity();
 549   assert((size & (granularity - 1)) == 0,
 550          "size not aligned to os::vm_allocation_granularity()");
 551   assert((alignment & (granularity - 1)) == 0,
 552          "alignment not aligned to os::vm_allocation_granularity()");
 553   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 554          "not a power of 2");
 555 
 556   // The necessary attach point alignment for generated wish addresses.
 557   // This is needed to increase the chance of attaching for mmap and shmat.
 558   const size_t os_attach_point_alignment =
 559     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
 560     NOT_AIX(os::vm_allocation_granularity());
 561   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 562 
 563   char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment);
 564   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
 565     noaccess_prefix_size(alignment) : 0;
 566 
 567   // Attempt to alloc at user-given address.
 568   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 569     try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
 570     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
 571       release();
 572     }
 573   }
 574 
 575   // Keep heap at HeapBaseMinAddress.
 576   if (_base == NULL) {
 577 
 578     // Try to allocate the heap at addresses that allow efficient oop compression.
 579     // Different schemes are tried, in order of decreasing optimization potential.
 580     //
 581     // For this, try_reserve_heap() is called with the desired heap base addresses.
 582     // A call into the os layer to allocate at a given address can return memory
 583     // at a different address than requested.  Still, this might be memory at a useful
 584     // address. try_reserve_heap() always returns this allocated memory, as only here
 585     // the criteria for a good heap are checked.
 586 
 587     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 588     // Give it several tries from top of range to bottom.
 589     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
 590 
 591       // Calc address range within we try to attach (range of possible start addresses).
 592       char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 593       char* const lowest_start  = align_up(aligned_heap_base_min_address, attach_point_alignment);
 594       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 595                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
 596     }
 597 
 598     // zerobased: Attempt to allocate in the lower 32G.
 599     // But leave room for the compressed class pointers, which is allocated above
 600     // the heap.
 601     char *zerobased_max = (char *)OopEncodingHeapMax;
 602     const size_t class_space = align_up(CompressedClassSpaceSize, alignment);
 603     // For small heaps, save some space for compressed class pointer
 604     // space so it can be decoded with no base.
 605     if (UseCompressedClassPointers && !UseSharedSpaces &&
 606         OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
 607         (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
 608       zerobased_max = (char *)OopEncodingHeapMax - class_space;
 609     }
 610 
 611     // Give it several tries from top of range to bottom.
 612     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
 613         ((_base == NULL) ||                        // No previous try succeeded.
 614          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 615 
 616       // Calc address range within we try to attach (range of possible start addresses).
 617       char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
 618       // Need to be careful about size being guaranteed to be less
 619       // than UnscaledOopHeapMax due to type constraints.
 620       char *lowest_start = aligned_heap_base_min_address;
 621       uint64_t unscaled_end = UnscaledOopHeapMax - size;
 622       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
 623         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
 624       }
 625       lowest_start = align_up(lowest_start, attach_point_alignment);
 626       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 627                         aligned_heap_base_min_address, zerobased_max, size, alignment, large);
 628     }
 629 
 630     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 631     // implement null checks.
 632     noaccess_prefix = noaccess_prefix_size(alignment);
 633 
 634     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
 635     char** addresses = get_attach_addresses_for_disjoint_mode();
 636     int i = 0;
 637     while (addresses[i] &&                                 // End of array not yet reached.
 638            ((_base == NULL) ||                             // No previous try succeeded.
 639             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 640              !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 641       char* const attach_point = addresses[i];
 642       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 643       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 644       i++;
 645     }
 646 
 647     // Last, desperate try without any placement.
 648     if (_base == NULL) {
 649       log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
 650       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 651     }
 652   }
 653 }
 654 
 655 void ReservedHeapSpace::initialize_g1gc_nvdimm_dram_sizes(size_t size, size_t alignment) {
 656   _dram_size = (size_t)((size * G1MaxNewSizePercent)/100);
 657   size_t page_sz = os::vm_page_size() -1 ;
 658   _dram_size = (_dram_size + page_sz) & (~page_sz);
 659   // align sizes.
 660   _dram_size = align_down(_dram_size, alignment);
 661   _nvdimm_size = size - _dram_size;
 662   _nvdimm_size = (_nvdimm_size + page_sz) & (~page_sz);
 663   _nvdimm_size = align_down(_nvdimm_size, alignment);
 664 }
 665 
 666 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* heap_allocation_directory) : ReservedSpace() {
 667 
 668   if (size == 0) {
 669     return;
 670   }
 671 
 672   // if AllocateOldGen is used  
 673   if (AllocateOldGenAt != NULL) {
 674     _fd_for_heap = os::create_file_for_heap(AllocateOldGenAt);
 675     if (_fd_for_heap== -1) {
 676       vm_exit_during_initialization(
 677         err_msg("Could not create file for Heap at location %s", AllocateOldGenAt));
 678     }
 679     if (UseParallelOldGC) {
 680       // For ParallelOldGC, adaptive sizing picks _old_gen virtual space sizes as needed.
 681       // allocate Xmx on NVDIMM as adaptive sizing may put lot of pressure on NVDIMM.
 682       os::allocate_file(_fd_for_heap, MaxHeapSize);
 683       os::set_nvdimm_fd(_fd_for_heap);
 684       os::set_nvdimm_present(true);
 685     }
 686   } else {
 687     _fd_for_heap = -1;
 688   }
 689 
 690   if (heap_allocation_directory != NULL) {
 691     _fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
 692     if (_fd_for_heap == -1) {
 693       vm_exit_during_initialization(
 694         err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
 695     }
 696   }
 697 
 698   // Heap size should be aligned to alignment, too.
 699   guarantee(is_aligned(size, alignment), "set by caller");
 700 
 701   char* base_nv = NULL;
 702   _nvdimm_base_nv = NULL;
 703   
 704   if (_fd_for_heap != -1 && UseG1GC && AllocateOldGenAt != NULL) {
 705     if (!UseCompressedOops) {
 706       // if compressed oops use requested address.
 707       initialize_g1gc_nvdimm_dram_sizes(size, alignment);
 708       base_nv = os::reserve_memory(size, NULL, alignment);
 709       _nvdimm_base_nv = base_nv+_nvdimm_size; // hint for allocation address of DRAM heap
 710     }
 711   }
 712 
 713   if (UseCompressedOops) {
 714     initialize_compressed_heap(size, alignment, large);
 715     if (_size > size) {
 716       // We allocated heap with noaccess prefix.
 717       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 718       // if we had to try at arbitrary address.
 719       establish_noaccess_prefix();
 720     }
 721   } else {
 722     if (_fd_for_heap != -1 && UseG1GC && AllocateOldGenAt != NULL) {
 723       initialize(_dram_size, alignment, large, NULL, false);
 724     } else {
 725       initialize(size, alignment, large, NULL, false);
 726     }
 727   }
 728 
 729   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 730          "area must be distinguishable from marks for mark-sweep");
 731   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 732          "area must be distinguishable from marks for mark-sweep");
 733 
 734   if (base() != NULL) {
 735     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 736     if (_fd_for_heap != -1 && UseG1GC && AllocateOldGenAt != NULL) {
 737       os::set_nvdimm_present(true);
 738       os::set_dram_heapbase((address)_base);
 739       os::set_nvdimm_heapbase((address)_nvdimm_base);
 740       os::set_nvdimm_fd(_fd_for_heap);
 741       _size += _nvdimm_size;
 742       _base = _nvdimm_base;
 743       log_info(gc, heap)("Java DRAM Heap at [%p - %p] & NVDIMM Old Gen at [%p - %p] %ld \n", _nvdimm_base+_nvdimm_size, (char*)(_nvdimm_base+_nvdimm_size+_dram_size), _nvdimm_base, (char*)(_nvdimm_base+_nvdimm_size), size);
 744     }
 745   }
 746 
 747   if (_fd_for_heap != -1 && AllocateOldGenAt == NULL) {
 748     os::close(_fd_for_heap);
 749   }
 750 }
 751 
 752 // Reserve space for code segment.  Same as Java heap only we mark this as
 753 // executable.
 754 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 755                                      size_t rs_align,
 756                                      bool large) :
 757   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 758   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 759 }
 760 
 761 // VirtualSpace
 762 
 763 VirtualSpace::VirtualSpace() {
 764   _low_boundary           = NULL;
 765   _high_boundary          = NULL;
 766   _low                    = NULL;
 767   _high                   = NULL;
 768   _lower_high             = NULL;
 769   _middle_high            = NULL;
 770   _upper_high             = NULL;
 771   _lower_high_boundary    = NULL;
 772   _middle_high_boundary   = NULL;
 773   _upper_high_boundary    = NULL;
 774   _lower_alignment        = 0;
 775   _middle_alignment       = 0;
 776   _upper_alignment        = 0;
 777   _special                = false;
 778   _executable             = false;
 779 }
 780 
 781 
 782 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 783   const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
 784   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 785 }
 786 
 787 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 788   if(!rs.is_reserved()) return false;  // allocation failed.
 789   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 790   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 791 
 792   _low_boundary  = rs.base();
 793   _high_boundary = low_boundary() + rs.size();
 794 
 795   _low = low_boundary();
 796   _high = low();
 797 
 798   _special = rs.special();
 799   _executable = rs.executable();
 800 
 801   // When a VirtualSpace begins life at a large size, make all future expansion
 802   // and shrinking occur aligned to a granularity of large pages.  This avoids
 803   // fragmentation of physical addresses that inhibits the use of large pages
 804   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 805   // page size, the only spaces that get handled this way are codecache and
 806   // the heap itself, both of which provide a substantial performance
 807   // boost in many benchmarks when covered by large pages.
 808   //
 809   // No attempt is made to force large page alignment at the very top and
 810   // bottom of the space if they are not aligned so already.
 811   _lower_alignment  = os::vm_page_size();
 812   _middle_alignment = max_commit_granularity;
 813   _upper_alignment  = os::vm_page_size();
 814 
 815   // End of each region
 816   _lower_high_boundary = align_up(low_boundary(), middle_alignment());
 817   _middle_high_boundary = align_down(high_boundary(), middle_alignment());
 818   _upper_high_boundary = high_boundary();
 819 
 820   // High address of each region
 821   _lower_high = low_boundary();
 822   _middle_high = lower_high_boundary();
 823   _upper_high = middle_high_boundary();
 824 
 825   // commit to initial size
 826   if (committed_size > 0) {
 827     if (!expand_by(committed_size)) {
 828       return false;
 829     }
 830   }
 831   return true;
 832 }
 833 
 834 
 835 VirtualSpace::~VirtualSpace() {
 836   release();
 837 }
 838 
 839 
 840 void VirtualSpace::release() {
 841   // This does not release memory it reserved.
 842   // Caller must release via rs.release();
 843   _low_boundary           = NULL;
 844   _high_boundary          = NULL;
 845   _low                    = NULL;
 846   _high                   = NULL;
 847   _lower_high             = NULL;
 848   _middle_high            = NULL;
 849   _upper_high             = NULL;
 850   _lower_high_boundary    = NULL;
 851   _middle_high_boundary   = NULL;
 852   _upper_high_boundary    = NULL;
 853   _lower_alignment        = 0;
 854   _middle_alignment       = 0;
 855   _upper_alignment        = 0;
 856   _special                = false;
 857   _executable             = false;
 858 }
 859 
 860 
 861 size_t VirtualSpace::committed_size() const {
 862   return pointer_delta(high(), low(), sizeof(char));
 863 }
 864 
 865 
 866 size_t VirtualSpace::reserved_size() const {
 867   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 868 }
 869 
 870 
 871 size_t VirtualSpace::uncommitted_size()  const {
 872   return reserved_size() - committed_size();
 873 }
 874 
 875 size_t VirtualSpace::actual_committed_size() const {
 876   // Special VirtualSpaces commit all reserved space up front.
 877   if (special()) {
 878     return reserved_size();
 879   }
 880 
 881   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 882   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 883   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 884 
 885 #ifdef ASSERT
 886   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 887   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 888   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 889 
 890   if (committed_high > 0) {
 891     assert(committed_low == lower, "Must be");
 892     assert(committed_middle == middle, "Must be");
 893   }
 894 
 895   if (committed_middle > 0) {
 896     assert(committed_low == lower, "Must be");
 897   }
 898   if (committed_middle < middle) {
 899     assert(committed_high == 0, "Must be");
 900   }
 901 
 902   if (committed_low < lower) {
 903     assert(committed_high == 0, "Must be");
 904     assert(committed_middle == 0, "Must be");
 905   }
 906 #endif
 907 
 908   return committed_low + committed_middle + committed_high;
 909 }
 910 
 911 
 912 bool VirtualSpace::contains(const void* p) const {
 913   return low() <= (const char*) p && (const char*) p < high();
 914 }
 915 
 916 static void pretouch_expanded_memory(void* start, void* end) {
 917   assert(is_aligned(start, os::vm_page_size()), "Unexpected alignment");
 918   assert(is_aligned(end,   os::vm_page_size()), "Unexpected alignment");
 919 
 920   os::pretouch_memory(start, end);
 921 }
 922 
 923 static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) {
 924   if (os::commit_memory(start, size, alignment, executable)) {
 925     if (pre_touch || AlwaysPreTouch) {
 926       pretouch_expanded_memory(start, start + size);
 927     }
 928     return true;
 929   }
 930 
 931   debug_only(warning(
 932       "INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMAT
 933       " size=" SIZE_FORMAT ", executable=%d) failed",
 934       p2i(start), p2i(start + size), size, executable);)
 935 
 936   return false;
 937 }
 938 
 939 /*
 940    First we need to determine if a particular virtual space is using large
 941    pages.  This is done at the initialize function and only virtual spaces
 942    that are larger than LargePageSizeInBytes use large pages.  Once we
 943    have determined this, all expand_by and shrink_by calls must grow and
 944    shrink by large page size chunks.  If a particular request
 945    is within the current large page, the call to commit and uncommit memory
 946    can be ignored.  In the case that the low and high boundaries of this
 947    space is not large page aligned, the pages leading to the first large
 948    page address and the pages after the last large page address must be
 949    allocated with default pages.
 950 */
 951 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 952   if (uncommitted_size() < bytes) {
 953     return false;
 954   }
 955 
 956   if (special()) {
 957     // don't commit memory if the entire space is pinned in memory
 958     _high += bytes;
 959     return true;
 960   }
 961 
 962   char* previous_high = high();
 963   char* unaligned_new_high = high() + bytes;
 964   assert(unaligned_new_high <= high_boundary(), "cannot expand by more than upper boundary");
 965 
 966   // Calculate where the new high for each of the regions should be.  If
 967   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 968   // then the unaligned lower and upper new highs would be the
 969   // lower_high() and upper_high() respectively.
 970   char* unaligned_lower_new_high =  MIN2(unaligned_new_high, lower_high_boundary());
 971   char* unaligned_middle_new_high = MIN2(unaligned_new_high, middle_high_boundary());
 972   char* unaligned_upper_new_high =  MIN2(unaligned_new_high, upper_high_boundary());
 973 
 974   // Align the new highs based on the regions alignment.  lower and upper
 975   // alignment will always be default page size.  middle alignment will be
 976   // LargePageSizeInBytes if the actual size of the virtual space is in
 977   // fact larger than LargePageSizeInBytes.
 978   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
 979   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
 980   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
 981 
 982   // Determine which regions need to grow in this expand_by call.
 983   // If you are growing in the lower region, high() must be in that
 984   // region so calculate the size based on high().  For the middle and
 985   // upper regions, determine the starting point of growth based on the
 986   // location of high().  By getting the MAX of the region's low address
 987   // (or the previous region's high address) and high(), we can tell if it
 988   // is an intra or inter region growth.
 989   size_t lower_needs = 0;
 990   if (aligned_lower_new_high > lower_high()) {
 991     lower_needs = pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 992   }
 993   size_t middle_needs = 0;
 994   if (aligned_middle_new_high > middle_high()) {
 995     middle_needs = pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 996   }
 997   size_t upper_needs = 0;
 998   if (aligned_upper_new_high > upper_high()) {
 999     upper_needs = pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
1000   }
1001 
1002   // Check contiguity.
1003   assert(low_boundary() <= lower_high() && lower_high() <= lower_high_boundary(),
1004          "high address must be contained within the region");
1005   assert(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary(),
1006          "high address must be contained within the region");
1007   assert(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary(),
1008          "high address must be contained within the region");
1009 
1010   // Commit regions
1011   if (lower_needs > 0) {
1012     assert(lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region");
1013     if (!commit_expanded(lower_high(), lower_needs, _lower_alignment, pre_touch, _executable)) {
1014       return false;
1015     }
1016     _lower_high += lower_needs;
1017   }
1018 
1019   if (middle_needs > 0) {
1020     assert(middle_high() + middle_needs <= middle_high_boundary(), "must not expand beyond region");
1021     if (!commit_expanded(middle_high(), middle_needs, _middle_alignment, pre_touch, _executable)) {
1022       return false;
1023     }
1024     _middle_high += middle_needs;
1025   }
1026 
1027   if (upper_needs > 0) {
1028     assert(upper_high() + upper_needs <= upper_high_boundary(), "must not expand beyond region");
1029     if (!commit_expanded(upper_high(), upper_needs, _upper_alignment, pre_touch, _executable)) {
1030       return false;
1031     }
1032     _upper_high += upper_needs;
1033   }
1034 
1035   _high += bytes;
1036   return true;
1037 }
1038 
1039 // A page is uncommitted if the contents of the entire page is deemed unusable.
1040 // Continue to decrement the high() pointer until it reaches a page boundary
1041 // in which case that particular page can now be uncommitted.
1042 void VirtualSpace::shrink_by(size_t size) {
1043   if (committed_size() < size)
1044     fatal("Cannot shrink virtual space to negative size");
1045 
1046   if (special()) {
1047     // don't uncommit if the entire space is pinned in memory
1048     _high -= size;
1049     return;
1050   }
1051 
1052   char* unaligned_new_high = high() - size;
1053   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
1054 
1055   // Calculate new unaligned address
1056   char* unaligned_upper_new_high =
1057     MAX2(unaligned_new_high, middle_high_boundary());
1058   char* unaligned_middle_new_high =
1059     MAX2(unaligned_new_high, lower_high_boundary());
1060   char* unaligned_lower_new_high =
1061     MAX2(unaligned_new_high, low_boundary());
1062 
1063   // Align address to region's alignment
1064   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
1065   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
1066   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
1067 
1068   // Determine which regions need to shrink
1069   size_t upper_needs = 0;
1070   if (aligned_upper_new_high < upper_high()) {
1071     upper_needs =
1072       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
1073   }
1074   size_t middle_needs = 0;
1075   if (aligned_middle_new_high < middle_high()) {
1076     middle_needs =
1077       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
1078   }
1079   size_t lower_needs = 0;
1080   if (aligned_lower_new_high < lower_high()) {
1081     lower_needs =
1082       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
1083   }
1084 
1085   // Check contiguity.
1086   assert(middle_high_boundary() <= upper_high() &&
1087          upper_high() <= upper_high_boundary(),
1088          "high address must be contained within the region");
1089   assert(lower_high_boundary() <= middle_high() &&
1090          middle_high() <= middle_high_boundary(),
1091          "high address must be contained within the region");
1092   assert(low_boundary() <= lower_high() &&
1093          lower_high() <= lower_high_boundary(),
1094          "high address must be contained within the region");
1095 
1096   // Uncommit
1097   if (upper_needs > 0) {
1098     assert(middle_high_boundary() <= aligned_upper_new_high &&
1099            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
1100            "must not shrink beyond region");
1101     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
1102       debug_only(warning("os::uncommit_memory failed"));
1103       return;
1104     } else {
1105       _upper_high -= upper_needs;
1106     }
1107   }
1108   if (middle_needs > 0) {
1109     assert(lower_high_boundary() <= aligned_middle_new_high &&
1110            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
1111            "must not shrink beyond region");
1112     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
1113       debug_only(warning("os::uncommit_memory failed"));
1114       return;
1115     } else {
1116       _middle_high -= middle_needs;
1117     }
1118   }
1119   if (lower_needs > 0) {
1120     assert(low_boundary() <= aligned_lower_new_high &&
1121            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
1122            "must not shrink beyond region");
1123     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
1124       debug_only(warning("os::uncommit_memory failed"));
1125       return;
1126     } else {
1127       _lower_high -= lower_needs;
1128     }
1129   }
1130 
1131   _high -= size;
1132 }
1133 
1134 #ifndef PRODUCT
1135 void VirtualSpace::check_for_contiguity() {
1136   // Check contiguity.
1137   assert(low_boundary() <= lower_high() &&
1138          lower_high() <= lower_high_boundary(),
1139          "high address must be contained within the region");
1140   assert(lower_high_boundary() <= middle_high() &&
1141          middle_high() <= middle_high_boundary(),
1142          "high address must be contained within the region");
1143   assert(middle_high_boundary() <= upper_high() &&
1144          upper_high() <= upper_high_boundary(),
1145          "high address must be contained within the region");
1146   assert(low() >= low_boundary(), "low");
1147   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
1148   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
1149   assert(high() <= upper_high(), "upper high");
1150 }
1151 
1152 void VirtualSpace::print_on(outputStream* out) {
1153   out->print   ("Virtual space:");
1154   if (special()) out->print(" (pinned in memory)");
1155   out->cr();
1156   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
1157   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
1158   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low()), p2i(high()));
1159   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low_boundary()), p2i(high_boundary()));
1160 }
1161 
1162 void VirtualSpace::print() {
1163   print_on(tty);
1164 }
1165 
1166 /////////////// Unit tests ///////////////
1167 
1168 #ifndef PRODUCT
1169 
1170 #define test_log(...) \
1171   do {\
1172     if (VerboseInternalVMTests) { \
1173       tty->print_cr(__VA_ARGS__); \
1174       tty->flush(); \
1175     }\
1176   } while (false)
1177 
1178 class TestReservedSpace : AllStatic {
1179  public:
1180   static void small_page_write(void* addr, size_t size) {
1181     size_t page_size = os::vm_page_size();
1182 
1183     char* end = (char*)addr + size;
1184     for (char* p = (char*)addr; p < end; p += page_size) {
1185       *p = 1;
1186     }
1187   }
1188 
1189   static void release_memory_for_test(ReservedSpace rs) {
1190     if (rs.special()) {
1191       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1192     } else {
1193       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1194     }
1195   }
1196 
1197   static void test_reserved_space1(size_t size, size_t alignment) {
1198     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
1199 
1200     assert(is_aligned(size, alignment), "Incorrect input parameters");
1201 
1202     ReservedSpace rs(size,          // size
1203                      alignment,     // alignment
1204                      UseLargePages, // large
1205                      (char *)NULL); // requested_address
1206 
1207     test_log(" rs.special() == %d", rs.special());
1208 
1209     assert(rs.base() != NULL, "Must be");
1210     assert(rs.size() == size, "Must be");
1211 
1212     assert(is_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1213     assert(is_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1214 
1215     if (rs.special()) {
1216       small_page_write(rs.base(), size);
1217     }
1218 
1219     release_memory_for_test(rs);
1220   }
1221 
1222   static void test_reserved_space2(size_t size) {
1223     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
1224 
1225     assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1226 
1227     ReservedSpace rs(size);
1228 
1229     test_log(" rs.special() == %d", rs.special());
1230 
1231     assert(rs.base() != NULL, "Must be");
1232     assert(rs.size() == size, "Must be");
1233 
1234     if (rs.special()) {
1235       small_page_write(rs.base(), size);
1236     }
1237 
1238     release_memory_for_test(rs);
1239   }
1240 
1241   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1242     test_log("test_reserved_space3(%p, %p, %d)",
1243         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
1244 
1245     if (size < alignment) {
1246       // Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test.
1247       assert((size_t)os::vm_page_size() == os::large_page_size(), "Test needs further refinement");
1248       return;
1249     }
1250 
1251     assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1252     assert(is_aligned(size, alignment), "Must be at least aligned against alignment");
1253 
1254     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1255 
1256     ReservedSpace rs(size, alignment, large, false);
1257 
1258     test_log(" rs.special() == %d", rs.special());
1259 
1260     assert(rs.base() != NULL, "Must be");
1261     assert(rs.size() == size, "Must be");
1262 
1263     if (rs.special()) {
1264       small_page_write(rs.base(), size);
1265     }
1266 
1267     release_memory_for_test(rs);
1268   }
1269 
1270 
1271   static void test_reserved_space1() {
1272     size_t size = 2 * 1024 * 1024;
1273     size_t ag   = os::vm_allocation_granularity();
1274 
1275     test_reserved_space1(size,      ag);
1276     test_reserved_space1(size * 2,  ag);
1277     test_reserved_space1(size * 10, ag);
1278   }
1279 
1280   static void test_reserved_space2() {
1281     size_t size = 2 * 1024 * 1024;
1282     size_t ag = os::vm_allocation_granularity();
1283 
1284     test_reserved_space2(size * 1);
1285     test_reserved_space2(size * 2);
1286     test_reserved_space2(size * 10);
1287     test_reserved_space2(ag);
1288     test_reserved_space2(size - ag);
1289     test_reserved_space2(size);
1290     test_reserved_space2(size + ag);
1291     test_reserved_space2(size * 2);
1292     test_reserved_space2(size * 2 - ag);
1293     test_reserved_space2(size * 2 + ag);
1294     test_reserved_space2(size * 3);
1295     test_reserved_space2(size * 3 - ag);
1296     test_reserved_space2(size * 3 + ag);
1297     test_reserved_space2(size * 10);
1298     test_reserved_space2(size * 10 + size / 2);
1299   }
1300 
1301   static void test_reserved_space3() {
1302     size_t ag = os::vm_allocation_granularity();
1303 
1304     test_reserved_space3(ag,      ag    , false);
1305     test_reserved_space3(ag * 2,  ag    , false);
1306     test_reserved_space3(ag * 3,  ag    , false);
1307     test_reserved_space3(ag * 2,  ag * 2, false);
1308     test_reserved_space3(ag * 4,  ag * 2, false);
1309     test_reserved_space3(ag * 8,  ag * 2, false);
1310     test_reserved_space3(ag * 4,  ag * 4, false);
1311     test_reserved_space3(ag * 8,  ag * 4, false);
1312     test_reserved_space3(ag * 16, ag * 4, false);
1313 
1314     if (UseLargePages) {
1315       size_t lp = os::large_page_size();
1316 
1317       // Without large pages
1318       test_reserved_space3(lp,     ag * 4, false);
1319       test_reserved_space3(lp * 2, ag * 4, false);
1320       test_reserved_space3(lp * 4, ag * 4, false);
1321       test_reserved_space3(lp,     lp    , false);
1322       test_reserved_space3(lp * 2, lp    , false);
1323       test_reserved_space3(lp * 3, lp    , false);
1324       test_reserved_space3(lp * 2, lp * 2, false);
1325       test_reserved_space3(lp * 4, lp * 2, false);
1326       test_reserved_space3(lp * 8, lp * 2, false);
1327 
1328       // With large pages
1329       test_reserved_space3(lp, ag * 4    , true);
1330       test_reserved_space3(lp * 2, ag * 4, true);
1331       test_reserved_space3(lp * 4, ag * 4, true);
1332       test_reserved_space3(lp, lp        , true);
1333       test_reserved_space3(lp * 2, lp    , true);
1334       test_reserved_space3(lp * 3, lp    , true);
1335       test_reserved_space3(lp * 2, lp * 2, true);
1336       test_reserved_space3(lp * 4, lp * 2, true);
1337       test_reserved_space3(lp * 8, lp * 2, true);
1338     }
1339   }
1340 
1341   static void test_reserved_space() {
1342     test_reserved_space1();
1343     test_reserved_space2();
1344     test_reserved_space3();
1345   }
1346 };
1347 
1348 void TestReservedSpace_test() {
1349   TestReservedSpace::test_reserved_space();
1350 }
1351 
1352 #define assert_equals(actual, expected)  \
1353   assert(actual == expected,             \
1354          "Got " SIZE_FORMAT " expected " \
1355          SIZE_FORMAT, actual, expected);
1356 
1357 #define assert_ge(value1, value2)                  \
1358   assert(value1 >= value2,                         \
1359          "'" #value1 "': " SIZE_FORMAT " '"        \
1360          #value2 "': " SIZE_FORMAT, value1, value2);
1361 
1362 #define assert_lt(value1, value2)                  \
1363   assert(value1 < value2,                          \
1364          "'" #value1 "': " SIZE_FORMAT " '"        \
1365          #value2 "': " SIZE_FORMAT, value1, value2);
1366 
1367 
1368 class TestVirtualSpace : AllStatic {
1369   enum TestLargePages {
1370     Default,
1371     Disable,
1372     Reserve,
1373     Commit
1374   };
1375 
1376   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
1377     switch(mode) {
1378     default:
1379     case Default:
1380     case Reserve:
1381       return ReservedSpace(reserve_size_aligned);
1382     case Disable:
1383     case Commit:
1384       return ReservedSpace(reserve_size_aligned,
1385                            os::vm_allocation_granularity(),
1386                            /* large */ false, /* exec */ false);
1387     }
1388   }
1389 
1390   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1391     switch(mode) {
1392     default:
1393     case Default:
1394     case Reserve:
1395       return vs.initialize(rs, 0);
1396     case Disable:
1397       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1398     case Commit:
1399       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1400     }
1401   }
1402 
1403  public:
1404   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1405                                                         TestLargePages mode = Default) {
1406     size_t granularity = os::vm_allocation_granularity();
1407     size_t reserve_size_aligned = align_up(reserve_size, granularity);
1408 
1409     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1410 
1411     assert(reserved.is_reserved(), "Must be");
1412 
1413     VirtualSpace vs;
1414     bool initialized = initialize_virtual_space(vs, reserved, mode);
1415     assert(initialized, "Failed to initialize VirtualSpace");
1416 
1417     vs.expand_by(commit_size, false);
1418 
1419     if (vs.special()) {
1420       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1421     } else {
1422       assert_ge(vs.actual_committed_size(), commit_size);
1423       // Approximate the commit granularity.
1424       // Make sure that we don't commit using large pages
1425       // if large pages has been disabled for this VirtualSpace.
1426       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1427                                    os::vm_page_size() : os::large_page_size();
1428       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1429     }
1430 
1431     reserved.release();
1432   }
1433 
1434   static void test_virtual_space_actual_committed_space_one_large_page() {
1435     if (!UseLargePages) {
1436       return;
1437     }
1438 
1439     size_t large_page_size = os::large_page_size();
1440 
1441     ReservedSpace reserved(large_page_size, large_page_size, true, false);
1442 
1443     assert(reserved.is_reserved(), "Must be");
1444 
1445     VirtualSpace vs;
1446     bool initialized = vs.initialize(reserved, 0);
1447     assert(initialized, "Failed to initialize VirtualSpace");
1448 
1449     vs.expand_by(large_page_size, false);
1450 
1451     assert_equals(vs.actual_committed_size(), large_page_size);
1452 
1453     reserved.release();
1454   }
1455 
1456   static void test_virtual_space_actual_committed_space() {
1457     test_virtual_space_actual_committed_space(4 * K, 0);
1458     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1459     test_virtual_space_actual_committed_space(8 * K, 0);
1460     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1461     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1462     test_virtual_space_actual_committed_space(12 * K, 0);
1463     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1464     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1465     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1466     test_virtual_space_actual_committed_space(64 * K, 0);
1467     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1468     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1469     test_virtual_space_actual_committed_space(2 * M, 0);
1470     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1471     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1472     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1473     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1474     test_virtual_space_actual_committed_space(10 * M, 0);
1475     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1476     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1477     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1478     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1479     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1480     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1481   }
1482 
1483   static void test_virtual_space_disable_large_pages() {
1484     if (!UseLargePages) {
1485       return;
1486     }
1487     // These test cases verify that if we force VirtualSpace to disable large pages
1488     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1489     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1490     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1491     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1492     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1493     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1494     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1495 
1496     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1497     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1498     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1499     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1500     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1501     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1502     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1503 
1504     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1505     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1506     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1507     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1508     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1509     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1510     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1511   }
1512 
1513   static void test_virtual_space() {
1514     test_virtual_space_actual_committed_space();
1515     test_virtual_space_actual_committed_space_one_large_page();
1516     test_virtual_space_disable_large_pages();
1517   }
1518 };
1519 
1520 void TestVirtualSpace_test() {
1521   TestVirtualSpace::test_virtual_space();
1522 }
1523 
1524 #endif // PRODUCT
1525 
1526 #endif