1 /*
   2  * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "logging/log.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "memory/virtualspace.hpp"
  29 #include "oops/markOop.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "services/memTracker.hpp"
  32 #include "utilities/align.hpp"
  33 
  34 // ReservedSpace
  35 
  36 // Dummy constructor
  37 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  38     _alignment(0), _special(false), _executable(false) {
  39 }
  40 
  41 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) {
  42   bool has_preferred_page_size = preferred_page_size != 0;
  43   // Want to use large pages where possible and pad with small pages.
  44   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
  45   bool large_pages = page_size != (size_t)os::vm_page_size();
  46   size_t alignment;
  47   if (large_pages && has_preferred_page_size) {
  48     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  49     // ReservedSpace initialization requires size to be aligned to the given
  50     // alignment. Align the size up.
  51     size = align_up(size, alignment);
  52   } else {
  53     // Don't force the alignment to be large page aligned,
  54     // since that will waste memory.
  55     alignment = os::vm_allocation_granularity();
  56   }
  57   initialize(size, alignment, large_pages, NULL, false);
  58 }
  59 
  60 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  61                              bool large,
  62                              char* requested_address) {
  63   initialize(size, alignment, large, requested_address, false);
  64 }
  65 
  66 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  67                              bool large,
  68                              bool executable) {
  69   initialize(size, alignment, large, NULL, executable);
  70 }
  71 
  72 // Helper method.
  73 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  74                                            const size_t size, bool special)
  75 {
  76   if (base == requested_address || requested_address == NULL)
  77     return false; // did not fail
  78 
  79   if (base != NULL) {
  80     // Different reserve address may be acceptable in other cases
  81     // but for compressed oops heap should be at requested address.
  82     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  83     log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
  84     // OS ignored requested address. Try different address.
  85     if (special) {
  86       if (!os::release_memory_special(base, size)) {
  87         fatal("os::release_memory_special failed");
  88       }
  89     } else {
  90       if (!os::release_memory(base, size)) {
  91         fatal("os::release_memory failed");
  92       }
  93     }
  94   }
  95   return true;
  96 }
  97 
  98 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
  99                                char* requested_address,
 100                                bool executable) {
 101   const size_t granularity = os::vm_allocation_granularity();
 102   assert((size & (granularity - 1)) == 0,
 103          "size not aligned to os::vm_allocation_granularity()");
 104   assert((alignment & (granularity - 1)) == 0,
 105          "alignment not aligned to os::vm_allocation_granularity()");
 106   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 107          "not a power of 2");
 108 
 109   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 110 
 111   _base = NULL;
 112   _size = 0;
 113   _special = false;
 114   _executable = executable;
 115   _alignment = 0;
 116   _noaccess_prefix = 0;
 117   if (size == 0) {
 118     return;
 119   }
 120 
 121   // If OS doesn't support demand paging for large page memory, we need
 122   // to use reserve_memory_special() to reserve and pin the entire region.
 123   bool special = large && !os::can_commit_large_page_memory();
 124   char* base = NULL;
 125 
 126   if (special) {
 127 
 128     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 129 
 130     if (base != NULL) {
 131       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 132         // OS ignored requested address. Try different address.
 133         return;
 134       }
 135       // Check alignment constraints.
 136       assert((uintptr_t) base % alignment == 0,
 137              "Large pages returned a non-aligned address, base: "
 138              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 139              p2i(base), alignment);
 140       _special = true;
 141     } else {
 142       // failed; try to reserve regular memory below
 143       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 144                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 145         log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 146       }
 147     }
 148   }
 149 
 150   if (base == NULL) {
 151     // Optimistically assume that the OSes returns an aligned base pointer.
 152     // When reserving a large address range, most OSes seem to align to at
 153     // least 64K.
 154 
 155     // If the memory was requested at a particular address, use
 156     // os::attempt_reserve_memory_at() to avoid over mapping something
 157     // important.  If available space is not detected, return NULL.
 158 
 159     if (requested_address != 0) {
 160       base = os::attempt_reserve_memory_at(size, requested_address);
 161       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 162         // OS ignored requested address. Try different address.
 163         base = NULL;
 164       }
 165     } else {
 166       base = os::reserve_memory(size, NULL, alignment);
 167     }
 168 
 169     if (base == NULL) return;
 170 
 171     // Check alignment constraints
 172     if ((((size_t)base) & (alignment - 1)) != 0) {
 173       // Base not aligned, retry
 174       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 175       // Make sure that size is aligned
 176       size = align_up(size, alignment);
 177       base = os::reserve_memory_aligned(size, alignment);
 178 
 179       if (requested_address != 0 &&
 180           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 181         // As a result of the alignment constraints, the allocated base differs
 182         // from the requested address. Return back to the caller who can
 183         // take remedial action (like try again without a requested address).
 184         assert(_base == NULL, "should be");
 185         return;
 186       }
 187     }
 188   }
 189   // Done
 190   _base = base;
 191   _size = size;
 192   _alignment = alignment;
 193 }
 194 
 195 
 196 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 197                              bool special, bool executable) {
 198   assert((size % os::vm_allocation_granularity()) == 0,
 199          "size not allocation aligned");
 200   _base = base;
 201   _size = size;
 202   _alignment = alignment;
 203   _noaccess_prefix = 0;
 204   _special = special;
 205   _executable = executable;
 206 }
 207 
 208 
 209 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 210                                         bool split, bool realloc) {
 211   assert(partition_size <= size(), "partition failed");
 212   if (split) {
 213     os::split_reserved_memory(base(), size(), partition_size, realloc);
 214   }
 215   ReservedSpace result(base(), partition_size, alignment, special(),
 216                        executable());
 217   return result;
 218 }
 219 
 220 
 221 ReservedSpace
 222 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 223   assert(partition_size <= size(), "partition failed");
 224   ReservedSpace result(base() + partition_size, size() - partition_size,
 225                        alignment, special(), executable());
 226   return result;
 227 }
 228 
 229 
 230 size_t ReservedSpace::page_align_size_up(size_t size) {
 231   return align_up(size, os::vm_page_size());
 232 }
 233 
 234 
 235 size_t ReservedSpace::page_align_size_down(size_t size) {
 236   return align_down(size, os::vm_page_size());
 237 }
 238 
 239 
 240 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 241   return align_up(size, os::vm_allocation_granularity());
 242 }
 243 
 244 
 245 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 246   return align_down(size, os::vm_allocation_granularity());
 247 }
 248 
 249 
 250 void ReservedSpace::release() {
 251   if (is_reserved()) {
 252     char *real_base = _base - _noaccess_prefix;
 253     const size_t real_size = _size + _noaccess_prefix;
 254     if (special()) {
 255       os::release_memory_special(real_base, real_size);
 256     } else{
 257       os::release_memory(real_base, real_size);
 258     }
 259     _base = NULL;
 260     _size = 0;
 261     _noaccess_prefix = 0;
 262     _alignment = 0;
 263     _special = false;
 264     _executable = false;
 265   }
 266 }
 267 
 268 static size_t noaccess_prefix_size(size_t alignment) {
 269   return lcm(os::vm_page_size(), alignment);
 270 }
 271 
 272 void ReservedHeapSpace::establish_noaccess_prefix() {
 273   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 274   _noaccess_prefix = noaccess_prefix_size(_alignment);
 275 
 276   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 277     if (true
 278         WIN64_ONLY(&& !UseLargePages)
 279         AIX_ONLY(&& os::vm_page_size() != 64*K)) {
 280       // Protect memory at the base of the allocated region.
 281       // If special, the page was committed (only matters on windows)
 282       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 283         fatal("cannot protect protection page");
 284       }
 285       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
 286                                  PTR_FORMAT " / " INTX_FORMAT " bytes",
 287                                  p2i(_base),
 288                                  _noaccess_prefix);
 289       assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
 290     } else {
 291       Universe::set_narrow_oop_use_implicit_null_checks(false);
 292     }
 293   }
 294 
 295   _base += _noaccess_prefix;
 296   _size -= _noaccess_prefix;
 297   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 298 }
 299 
 300 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 301 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 302 // might still fulfill the wishes of the caller.
 303 // Assures the memory is aligned to 'alignment'.
 304 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 305 void ReservedHeapSpace::try_reserve_heap(size_t size,
 306                                          size_t alignment,
 307                                          bool large,
 308                                          char* requested_address) {
 309   if (_base != NULL) {
 310     // We tried before, but we didn't like the address delivered.
 311     release();
 312   }
 313 
 314   // If OS doesn't support demand paging for large page memory, we need
 315   // to use reserve_memory_special() to reserve and pin the entire region.
 316   bool special = large && !os::can_commit_large_page_memory();
 317   char* base = NULL;
 318 
 319   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
 320                              " heap of size " SIZE_FORMAT_HEX,
 321                              p2i(requested_address),
 322                              size);
 323 
 324   if (special) {
 325     base = os::reserve_memory_special(size, alignment, requested_address, false);
 326 
 327     if (base != NULL) {
 328       // Check alignment constraints.
 329       assert((uintptr_t) base % alignment == 0,
 330              "Large pages returned a non-aligned address, base: "
 331              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 332              p2i(base), alignment);
 333       _special = true;
 334     }
 335   }
 336 
 337   if (base == NULL) {
 338     // Failed; try to reserve regular memory below
 339     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 340                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 341       log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 342     }
 343 
 344     // Optimistically assume that the OSes returns an aligned base pointer.
 345     // When reserving a large address range, most OSes seem to align to at
 346     // least 64K.
 347 
 348     // If the memory was requested at a particular address, use
 349     // os::attempt_reserve_memory_at() to avoid over mapping something
 350     // important.  If available space is not detected, return NULL.
 351 
 352     if (requested_address != 0) {
 353       base = os::attempt_reserve_memory_at(size, requested_address);
 354     } else {
 355       base = os::reserve_memory(size, NULL, alignment);
 356     }
 357   }
 358   if (base == NULL) { return; }
 359 
 360   // Done
 361   _base = base;
 362   _size = size;
 363   _alignment = alignment;
 364 
 365   // Check alignment constraints
 366   if ((((size_t)base) & (alignment - 1)) != 0) {
 367     // Base not aligned, retry.
 368     release();
 369   }
 370 }
 371 
 372 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 373                                           char *lowest_start,
 374                                           size_t attach_point_alignment,
 375                                           char *aligned_heap_base_min_address,
 376                                           char *upper_bound,
 377                                           size_t size,
 378                                           size_t alignment,
 379                                           bool large) {
 380   const size_t attach_range = highest_start - lowest_start;
 381   // Cap num_attempts at possible number.
 382   // At least one is possible even for 0 sized attach range.
 383   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
 384   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
 385 
 386   const size_t stepsize = (attach_range == 0) ? // Only one try.
 387     (size_t) highest_start : align_up(attach_range / num_attempts_to_try, attach_point_alignment);
 388 
 389   // Try attach points from top to bottom.
 390   char* attach_point = highest_start;
 391   while (attach_point >= lowest_start  &&
 392          attach_point <= highest_start &&  // Avoid wrap around.
 393          ((_base == NULL) ||
 394           (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
 395     try_reserve_heap(size, alignment, large, attach_point);
 396     attach_point -= stepsize;
 397   }
 398 }
 399 
 400 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
 401 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
 402 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
 403 
 404 // Helper for heap allocation. Returns an array with addresses
 405 // (OS-specific) which are suited for disjoint base mode. Array is
 406 // NULL terminated.
 407 static char** get_attach_addresses_for_disjoint_mode() {
 408   static uint64_t addresses[] = {
 409      2 * SIZE_32G,
 410      3 * SIZE_32G,
 411      4 * SIZE_32G,
 412      8 * SIZE_32G,
 413     10 * SIZE_32G,
 414      1 * SIZE_64K * SIZE_32G,
 415      2 * SIZE_64K * SIZE_32G,
 416      3 * SIZE_64K * SIZE_32G,
 417      4 * SIZE_64K * SIZE_32G,
 418     16 * SIZE_64K * SIZE_32G,
 419     32 * SIZE_64K * SIZE_32G,
 420     34 * SIZE_64K * SIZE_32G,
 421     0
 422   };
 423 
 424   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
 425   // the array is sorted.
 426   uint i = 0;
 427   while (addresses[i] != 0 &&
 428          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
 429     i++;
 430   }
 431   uint start = i;
 432 
 433   // Avoid more steps than requested.
 434   i = 0;
 435   while (addresses[start+i] != 0) {
 436     if (i == HeapSearchSteps) {
 437       addresses[start+i] = 0;
 438       break;
 439     }
 440     i++;
 441   }
 442 
 443   return (char**) &addresses[start];
 444 }
 445 
 446 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
 447   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
 448             "can not allocate compressed oop heap for this size");
 449   guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
 450   assert(HeapBaseMinAddress > 0, "sanity");
 451 
 452   const size_t granularity = os::vm_allocation_granularity();
 453   assert((size & (granularity - 1)) == 0,
 454          "size not aligned to os::vm_allocation_granularity()");
 455   assert((alignment & (granularity - 1)) == 0,
 456          "alignment not aligned to os::vm_allocation_granularity()");
 457   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 458          "not a power of 2");
 459 
 460   // The necessary attach point alignment for generated wish addresses.
 461   // This is needed to increase the chance of attaching for mmap and shmat.
 462   const size_t os_attach_point_alignment =
 463     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
 464     NOT_AIX(os::vm_allocation_granularity());
 465   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 466 
 467   char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment);
 468   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
 469     noaccess_prefix_size(alignment) : 0;
 470 
 471   // Attempt to alloc at user-given address.
 472   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 473     try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
 474     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
 475       release();
 476     }
 477   }
 478 
 479   // Keep heap at HeapBaseMinAddress.
 480   if (_base == NULL) {
 481 
 482     // Try to allocate the heap at addresses that allow efficient oop compression.
 483     // Different schemes are tried, in order of decreasing optimization potential.
 484     //
 485     // For this, try_reserve_heap() is called with the desired heap base addresses.
 486     // A call into the os layer to allocate at a given address can return memory
 487     // at a different address than requested.  Still, this might be memory at a useful
 488     // address. try_reserve_heap() always returns this allocated memory, as only here
 489     // the criteria for a good heap are checked.
 490 
 491     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 492     // Give it several tries from top of range to bottom.
 493     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
 494 
 495       // Calc address range within we try to attach (range of possible start addresses).
 496       char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 497       char* const lowest_start  = align_up(aligned_heap_base_min_address, attach_point_alignment);
 498       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 499                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
 500     }
 501 
 502     // zerobased: Attempt to allocate in the lower 32G.
 503     // But leave room for the compressed class pointers, which is allocated above
 504     // the heap.
 505     char *zerobased_max = (char *)OopEncodingHeapMax;
 506     const size_t class_space = align_up(CompressedClassSpaceSize, alignment);
 507     // For small heaps, save some space for compressed class pointer
 508     // space so it can be decoded with no base.
 509     if (UseCompressedClassPointers && !UseSharedSpaces &&
 510         OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
 511         (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
 512       zerobased_max = (char *)OopEncodingHeapMax - class_space;
 513     }
 514 
 515     // Give it several tries from top of range to bottom.
 516     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
 517         ((_base == NULL) ||                        // No previous try succeeded.
 518          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 519 
 520       // Calc address range within we try to attach (range of possible start addresses).
 521       char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
 522       // Need to be careful about size being guaranteed to be less
 523       // than UnscaledOopHeapMax due to type constraints.
 524       char *lowest_start = aligned_heap_base_min_address;
 525       uint64_t unscaled_end = UnscaledOopHeapMax - size;
 526       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
 527         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
 528       }
 529       lowest_start = align_up(lowest_start, attach_point_alignment);
 530       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 531                         aligned_heap_base_min_address, zerobased_max, size, alignment, large);
 532     }
 533 
 534     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 535     // implement null checks.
 536     noaccess_prefix = noaccess_prefix_size(alignment);
 537 
 538     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
 539     char** addresses = get_attach_addresses_for_disjoint_mode();
 540     int i = 0;
 541     while (addresses[i] &&                                 // End of array not yet reached.
 542            ((_base == NULL) ||                             // No previous try succeeded.
 543             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 544              !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 545       char* const attach_point = addresses[i];
 546       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 547       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 548       i++;
 549     }
 550 
 551     // Last, desperate try without any placement.
 552     if (_base == NULL) {
 553       log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
 554       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 555     }
 556   }
 557 }
 558 
 559 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() {
 560 
 561   if (size == 0) {
 562     return;
 563   }
 564 
 565   // Heap size should be aligned to alignment, too.
 566   guarantee(is_aligned(size, alignment), "set by caller");
 567 
 568   if (UseCompressedOops) {
 569     initialize_compressed_heap(size, alignment, large);
 570     if (_size > size) {
 571       // We allocated heap with noaccess prefix.
 572       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 573       // if we had to try at arbitrary address.
 574       establish_noaccess_prefix();
 575     }
 576   } else {
 577     initialize(size, alignment, large, NULL, false);
 578   }
 579 
 580   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 581          "area must be distinguishable from marks for mark-sweep");
 582   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 583          "area must be distinguishable from marks for mark-sweep");
 584 
 585   if (base() != NULL) {
 586     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 587   }
 588 }
 589 
 590 // Reserve space for code segment.  Same as Java heap only we mark this as
 591 // executable.
 592 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 593                                      size_t rs_align,
 594                                      bool large) :
 595   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 596   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 597 }
 598 
 599 // VirtualSpace
 600 
 601 VirtualSpace::VirtualSpace() {
 602   _low_boundary           = NULL;
 603   _high_boundary          = NULL;
 604   _low                    = NULL;
 605   _high                   = NULL;
 606   _lower_high             = NULL;
 607   _middle_high            = NULL;
 608   _upper_high             = NULL;
 609   _lower_high_boundary    = NULL;
 610   _middle_high_boundary   = NULL;
 611   _upper_high_boundary    = NULL;
 612   _lower_alignment        = 0;
 613   _middle_alignment       = 0;
 614   _upper_alignment        = 0;
 615   _special                = false;
 616   _executable             = false;
 617 }
 618 
 619 
 620 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 621   const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
 622   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 623 }
 624 
 625 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 626   if(!rs.is_reserved()) return false;  // allocation failed.
 627   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 628   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 629 
 630   _low_boundary  = rs.base();
 631   _high_boundary = low_boundary() + rs.size();
 632 
 633   _low = low_boundary();
 634   _high = low();
 635 
 636   _special = rs.special();
 637   _executable = rs.executable();
 638 
 639   // When a VirtualSpace begins life at a large size, make all future expansion
 640   // and shrinking occur aligned to a granularity of large pages.  This avoids
 641   // fragmentation of physical addresses that inhibits the use of large pages
 642   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 643   // page size, the only spaces that get handled this way are codecache and
 644   // the heap itself, both of which provide a substantial performance
 645   // boost in many benchmarks when covered by large pages.
 646   //
 647   // No attempt is made to force large page alignment at the very top and
 648   // bottom of the space if they are not aligned so already.
 649   _lower_alignment  = os::vm_page_size();
 650   _middle_alignment = max_commit_granularity;
 651   _upper_alignment  = os::vm_page_size();
 652 
 653   // End of each region
 654   _lower_high_boundary = align_up(low_boundary(), middle_alignment());
 655   _middle_high_boundary = align_down(high_boundary(), middle_alignment());
 656   _upper_high_boundary = high_boundary();
 657 
 658   // High address of each region
 659   _lower_high = low_boundary();
 660   _middle_high = lower_high_boundary();
 661   _upper_high = middle_high_boundary();
 662 
 663   // commit to initial size
 664   if (committed_size > 0) {
 665     if (!expand_by(committed_size)) {
 666       return false;
 667     }
 668   }
 669   return true;
 670 }
 671 
 672 
 673 VirtualSpace::~VirtualSpace() {
 674   release();
 675 }
 676 
 677 
 678 void VirtualSpace::release() {
 679   // This does not release memory it reserved.
 680   // Caller must release via rs.release();
 681   _low_boundary           = NULL;
 682   _high_boundary          = NULL;
 683   _low                    = NULL;
 684   _high                   = NULL;
 685   _lower_high             = NULL;
 686   _middle_high            = NULL;
 687   _upper_high             = NULL;
 688   _lower_high_boundary    = NULL;
 689   _middle_high_boundary   = NULL;
 690   _upper_high_boundary    = NULL;
 691   _lower_alignment        = 0;
 692   _middle_alignment       = 0;
 693   _upper_alignment        = 0;
 694   _special                = false;
 695   _executable             = false;
 696 }
 697 
 698 
 699 size_t VirtualSpace::committed_size() const {
 700   return pointer_delta(high(), low(), sizeof(char));
 701 }
 702 
 703 
 704 size_t VirtualSpace::reserved_size() const {
 705   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 706 }
 707 
 708 
 709 size_t VirtualSpace::uncommitted_size()  const {
 710   return reserved_size() - committed_size();
 711 }
 712 
 713 size_t VirtualSpace::actual_committed_size() const {
 714   // Special VirtualSpaces commit all reserved space up front.
 715   if (special()) {
 716     return reserved_size();
 717   }
 718 
 719   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 720   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 721   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 722 
 723 #ifdef ASSERT
 724   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 725   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 726   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 727 
 728   if (committed_high > 0) {
 729     assert(committed_low == lower, "Must be");
 730     assert(committed_middle == middle, "Must be");
 731   }
 732 
 733   if (committed_middle > 0) {
 734     assert(committed_low == lower, "Must be");
 735   }
 736   if (committed_middle < middle) {
 737     assert(committed_high == 0, "Must be");
 738   }
 739 
 740   if (committed_low < lower) {
 741     assert(committed_high == 0, "Must be");
 742     assert(committed_middle == 0, "Must be");
 743   }
 744 #endif
 745 
 746   return committed_low + committed_middle + committed_high;
 747 }
 748 
 749 
 750 bool VirtualSpace::contains(const void* p) const {
 751   return low() <= (const char*) p && (const char*) p < high();
 752 }
 753 
 754 static void pretouch_expanded_memory(void* start, void* end) {
 755   assert(is_aligned(start, os::vm_page_size()), "Unexpected alignment");
 756   assert(is_aligned(end,   os::vm_page_size()), "Unexpected alignment");
 757 
 758   os::pretouch_memory(start, end);
 759 }
 760 
 761 static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) {
 762   if (os::commit_memory(start, size, alignment, executable)) {
 763     if (pre_touch || AlwaysPreTouch) {
 764       pretouch_expanded_memory(start, start + size);
 765     }
 766     return true;
 767   }
 768 
 769   debug_only(warning(
 770       "INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMAT
 771       " size=" SIZE_FORMAT ", executable=%d) failed",
 772       p2i(start), p2i(start + size), size, executable);)
 773 
 774   return false;
 775 }
 776 
 777 /*
 778    First we need to determine if a particular virtual space is using large
 779    pages.  This is done at the initialize function and only virtual spaces
 780    that are larger than LargePageSizeInBytes use large pages.  Once we
 781    have determined this, all expand_by and shrink_by calls must grow and
 782    shrink by large page size chunks.  If a particular request
 783    is within the current large page, the call to commit and uncommit memory
 784    can be ignored.  In the case that the low and high boundaries of this
 785    space is not large page aligned, the pages leading to the first large
 786    page address and the pages after the last large page address must be
 787    allocated with default pages.
 788 */
 789 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 790   if (uncommitted_size() < bytes) {
 791     return false;
 792   }
 793 
 794   if (special()) {
 795     // don't commit memory if the entire space is pinned in memory
 796     _high += bytes;
 797     return true;
 798   }
 799 
 800   char* previous_high = high();
 801   char* unaligned_new_high = high() + bytes;
 802   assert(unaligned_new_high <= high_boundary(), "cannot expand by more than upper boundary");
 803 
 804   // Calculate where the new high for each of the regions should be.  If
 805   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 806   // then the unaligned lower and upper new highs would be the
 807   // lower_high() and upper_high() respectively.
 808   char* unaligned_lower_new_high =  MIN2(unaligned_new_high, lower_high_boundary());
 809   char* unaligned_middle_new_high = MIN2(unaligned_new_high, middle_high_boundary());
 810   char* unaligned_upper_new_high =  MIN2(unaligned_new_high, upper_high_boundary());
 811 
 812   // Align the new highs based on the regions alignment.  lower and upper
 813   // alignment will always be default page size.  middle alignment will be
 814   // LargePageSizeInBytes if the actual size of the virtual space is in
 815   // fact larger than LargePageSizeInBytes.
 816   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
 817   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
 818   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
 819 
 820   // Determine which regions need to grow in this expand_by call.
 821   // If you are growing in the lower region, high() must be in that
 822   // region so calculate the size based on high().  For the middle and
 823   // upper regions, determine the starting point of growth based on the
 824   // location of high().  By getting the MAX of the region's low address
 825   // (or the previous region's high address) and high(), we can tell if it
 826   // is an intra or inter region growth.
 827   size_t lower_needs = 0;
 828   if (aligned_lower_new_high > lower_high()) {
 829     lower_needs = pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 830   }
 831   size_t middle_needs = 0;
 832   if (aligned_middle_new_high > middle_high()) {
 833     middle_needs = pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 834   }
 835   size_t upper_needs = 0;
 836   if (aligned_upper_new_high > upper_high()) {
 837     upper_needs = pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 838   }
 839 
 840   // Check contiguity.
 841   assert(low_boundary() <= lower_high() && lower_high() <= lower_high_boundary(),
 842          "high address must be contained within the region");
 843   assert(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary(),
 844          "high address must be contained within the region");
 845   assert(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary(),
 846          "high address must be contained within the region");
 847 
 848   // Commit regions
 849   if (lower_needs > 0) {
 850     assert(lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region");
 851     if (!commit_expanded(lower_high(), lower_needs, _lower_alignment, pre_touch, _executable)) {
 852       return false;
 853     }
 854     _lower_high += lower_needs;
 855   }
 856 
 857   if (middle_needs > 0) {
 858     assert(middle_high() + middle_needs <= middle_high_boundary(), "must not expand beyond region");
 859     if (!commit_expanded(middle_high(), middle_needs, _middle_alignment, pre_touch, _executable)) {
 860       return false;
 861     }
 862     _middle_high += middle_needs;
 863   }
 864 
 865   if (upper_needs > 0) {
 866     assert(upper_high() + upper_needs <= upper_high_boundary(), "must not expand beyond region");
 867     if (!commit_expanded(upper_high(), upper_needs, _upper_alignment, pre_touch, _executable)) {
 868       return false;
 869     }
 870     _upper_high += upper_needs;
 871   }
 872 
 873   _high += bytes;
 874   return true;
 875 }
 876 
 877 // A page is uncommitted if the contents of the entire page is deemed unusable.
 878 // Continue to decrement the high() pointer until it reaches a page boundary
 879 // in which case that particular page can now be uncommitted.
 880 void VirtualSpace::shrink_by(size_t size) {
 881   if (committed_size() < size)
 882     fatal("Cannot shrink virtual space to negative size");
 883 
 884   if (special()) {
 885     // don't uncommit if the entire space is pinned in memory
 886     _high -= size;
 887     return;
 888   }
 889 
 890   char* unaligned_new_high = high() - size;
 891   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 892 
 893   // Calculate new unaligned address
 894   char* unaligned_upper_new_high =
 895     MAX2(unaligned_new_high, middle_high_boundary());
 896   char* unaligned_middle_new_high =
 897     MAX2(unaligned_new_high, lower_high_boundary());
 898   char* unaligned_lower_new_high =
 899     MAX2(unaligned_new_high, low_boundary());
 900 
 901   // Align address to region's alignment
 902   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
 903   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
 904   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
 905 
 906   // Determine which regions need to shrink
 907   size_t upper_needs = 0;
 908   if (aligned_upper_new_high < upper_high()) {
 909     upper_needs =
 910       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 911   }
 912   size_t middle_needs = 0;
 913   if (aligned_middle_new_high < middle_high()) {
 914     middle_needs =
 915       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 916   }
 917   size_t lower_needs = 0;
 918   if (aligned_lower_new_high < lower_high()) {
 919     lower_needs =
 920       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 921   }
 922 
 923   // Check contiguity.
 924   assert(middle_high_boundary() <= upper_high() &&
 925          upper_high() <= upper_high_boundary(),
 926          "high address must be contained within the region");
 927   assert(lower_high_boundary() <= middle_high() &&
 928          middle_high() <= middle_high_boundary(),
 929          "high address must be contained within the region");
 930   assert(low_boundary() <= lower_high() &&
 931          lower_high() <= lower_high_boundary(),
 932          "high address must be contained within the region");
 933 
 934   // Uncommit
 935   if (upper_needs > 0) {
 936     assert(middle_high_boundary() <= aligned_upper_new_high &&
 937            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 938            "must not shrink beyond region");
 939     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 940       debug_only(warning("os::uncommit_memory failed"));
 941       return;
 942     } else {
 943       _upper_high -= upper_needs;
 944     }
 945   }
 946   if (middle_needs > 0) {
 947     assert(lower_high_boundary() <= aligned_middle_new_high &&
 948            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 949            "must not shrink beyond region");
 950     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 951       debug_only(warning("os::uncommit_memory failed"));
 952       return;
 953     } else {
 954       _middle_high -= middle_needs;
 955     }
 956   }
 957   if (lower_needs > 0) {
 958     assert(low_boundary() <= aligned_lower_new_high &&
 959            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 960            "must not shrink beyond region");
 961     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 962       debug_only(warning("os::uncommit_memory failed"));
 963       return;
 964     } else {
 965       _lower_high -= lower_needs;
 966     }
 967   }
 968 
 969   _high -= size;
 970 }
 971 
 972 #ifndef PRODUCT
 973 void VirtualSpace::check_for_contiguity() {
 974   // Check contiguity.
 975   assert(low_boundary() <= lower_high() &&
 976          lower_high() <= lower_high_boundary(),
 977          "high address must be contained within the region");
 978   assert(lower_high_boundary() <= middle_high() &&
 979          middle_high() <= middle_high_boundary(),
 980          "high address must be contained within the region");
 981   assert(middle_high_boundary() <= upper_high() &&
 982          upper_high() <= upper_high_boundary(),
 983          "high address must be contained within the region");
 984   assert(low() >= low_boundary(), "low");
 985   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
 986   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
 987   assert(high() <= upper_high(), "upper high");
 988 }
 989 
 990 void VirtualSpace::print_on(outputStream* out) {
 991   out->print   ("Virtual space:");
 992   if (special()) out->print(" (pinned in memory)");
 993   out->cr();
 994   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
 995   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
 996   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low()), p2i(high()));
 997   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low_boundary()), p2i(high_boundary()));
 998 }
 999 
1000 void VirtualSpace::print() {
1001   print_on(tty);
1002 }
1003 
1004 /////////////// Unit tests ///////////////
1005 
1006 #ifndef PRODUCT
1007 
1008 #define test_log(...) \
1009   do {\
1010     if (VerboseInternalVMTests) { \
1011       tty->print_cr(__VA_ARGS__); \
1012       tty->flush(); \
1013     }\
1014   } while (false)
1015 
1016 class TestReservedSpace : AllStatic {
1017  public:
1018   static void small_page_write(void* addr, size_t size) {
1019     size_t page_size = os::vm_page_size();
1020 
1021     char* end = (char*)addr + size;
1022     for (char* p = (char*)addr; p < end; p += page_size) {
1023       *p = 1;
1024     }
1025   }
1026 
1027   static void release_memory_for_test(ReservedSpace rs) {
1028     if (rs.special()) {
1029       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1030     } else {
1031       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1032     }
1033   }
1034 
1035   static void test_reserved_space1(size_t size, size_t alignment) {
1036     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
1037 
1038     assert(is_aligned(size, alignment), "Incorrect input parameters");
1039 
1040     ReservedSpace rs(size,          // size
1041                      alignment,     // alignment
1042                      UseLargePages, // large
1043                      (char *)NULL); // requested_address
1044 
1045     test_log(" rs.special() == %d", rs.special());
1046 
1047     assert(rs.base() != NULL, "Must be");
1048     assert(rs.size() == size, "Must be");
1049 
1050     assert(is_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1051     assert(is_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1052 
1053     if (rs.special()) {
1054       small_page_write(rs.base(), size);
1055     }
1056 
1057     release_memory_for_test(rs);
1058   }
1059 
1060   static void test_reserved_space2(size_t size) {
1061     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
1062 
1063     assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1064 
1065     ReservedSpace rs(size);
1066 
1067     test_log(" rs.special() == %d", rs.special());
1068 
1069     assert(rs.base() != NULL, "Must be");
1070     assert(rs.size() == size, "Must be");
1071 
1072     if (rs.special()) {
1073       small_page_write(rs.base(), size);
1074     }
1075 
1076     release_memory_for_test(rs);
1077   }
1078 
1079   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1080     test_log("test_reserved_space3(%p, %p, %d)",
1081         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
1082 
1083     if (size < alignment) {
1084       // Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test.
1085       assert((size_t)os::vm_page_size() == os::large_page_size(), "Test needs further refinement");
1086       return;
1087     }
1088 
1089     assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1090     assert(is_aligned(size, alignment), "Must be at least aligned against alignment");
1091 
1092     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1093 
1094     ReservedSpace rs(size, alignment, large, false);
1095 
1096     test_log(" rs.special() == %d", rs.special());
1097 
1098     assert(rs.base() != NULL, "Must be");
1099     assert(rs.size() == size, "Must be");
1100 
1101     if (rs.special()) {
1102       small_page_write(rs.base(), size);
1103     }
1104 
1105     release_memory_for_test(rs);
1106   }
1107 
1108 
1109   static void test_reserved_space1() {
1110     size_t size = 2 * 1024 * 1024;
1111     size_t ag   = os::vm_allocation_granularity();
1112 
1113     test_reserved_space1(size,      ag);
1114     test_reserved_space1(size * 2,  ag);
1115     test_reserved_space1(size * 10, ag);
1116   }
1117 
1118   static void test_reserved_space2() {
1119     size_t size = 2 * 1024 * 1024;
1120     size_t ag = os::vm_allocation_granularity();
1121 
1122     test_reserved_space2(size * 1);
1123     test_reserved_space2(size * 2);
1124     test_reserved_space2(size * 10);
1125     test_reserved_space2(ag);
1126     test_reserved_space2(size - ag);
1127     test_reserved_space2(size);
1128     test_reserved_space2(size + ag);
1129     test_reserved_space2(size * 2);
1130     test_reserved_space2(size * 2 - ag);
1131     test_reserved_space2(size * 2 + ag);
1132     test_reserved_space2(size * 3);
1133     test_reserved_space2(size * 3 - ag);
1134     test_reserved_space2(size * 3 + ag);
1135     test_reserved_space2(size * 10);
1136     test_reserved_space2(size * 10 + size / 2);
1137   }
1138 
1139   static void test_reserved_space3() {
1140     size_t ag = os::vm_allocation_granularity();
1141 
1142     test_reserved_space3(ag,      ag    , false);
1143     test_reserved_space3(ag * 2,  ag    , false);
1144     test_reserved_space3(ag * 3,  ag    , false);
1145     test_reserved_space3(ag * 2,  ag * 2, false);
1146     test_reserved_space3(ag * 4,  ag * 2, false);
1147     test_reserved_space3(ag * 8,  ag * 2, false);
1148     test_reserved_space3(ag * 4,  ag * 4, false);
1149     test_reserved_space3(ag * 8,  ag * 4, false);
1150     test_reserved_space3(ag * 16, ag * 4, false);
1151 
1152     if (UseLargePages) {
1153       size_t lp = os::large_page_size();
1154 
1155       // Without large pages
1156       test_reserved_space3(lp,     ag * 4, false);
1157       test_reserved_space3(lp * 2, ag * 4, false);
1158       test_reserved_space3(lp * 4, ag * 4, false);
1159       test_reserved_space3(lp,     lp    , false);
1160       test_reserved_space3(lp * 2, lp    , false);
1161       test_reserved_space3(lp * 3, lp    , false);
1162       test_reserved_space3(lp * 2, lp * 2, false);
1163       test_reserved_space3(lp * 4, lp * 2, false);
1164       test_reserved_space3(lp * 8, lp * 2, false);
1165 
1166       // With large pages
1167       test_reserved_space3(lp, ag * 4    , true);
1168       test_reserved_space3(lp * 2, ag * 4, true);
1169       test_reserved_space3(lp * 4, ag * 4, true);
1170       test_reserved_space3(lp, lp        , true);
1171       test_reserved_space3(lp * 2, lp    , true);
1172       test_reserved_space3(lp * 3, lp    , true);
1173       test_reserved_space3(lp * 2, lp * 2, true);
1174       test_reserved_space3(lp * 4, lp * 2, true);
1175       test_reserved_space3(lp * 8, lp * 2, true);
1176     }
1177   }
1178 
1179   static void test_reserved_space() {
1180     test_reserved_space1();
1181     test_reserved_space2();
1182     test_reserved_space3();
1183   }
1184 };
1185 
1186 void TestReservedSpace_test() {
1187   TestReservedSpace::test_reserved_space();
1188 }
1189 
1190 #define assert_equals(actual, expected)  \
1191   assert(actual == expected,             \
1192          "Got " SIZE_FORMAT " expected " \
1193          SIZE_FORMAT, actual, expected);
1194 
1195 #define assert_ge(value1, value2)                  \
1196   assert(value1 >= value2,                         \
1197          "'" #value1 "': " SIZE_FORMAT " '"        \
1198          #value2 "': " SIZE_FORMAT, value1, value2);
1199 
1200 #define assert_lt(value1, value2)                  \
1201   assert(value1 < value2,                          \
1202          "'" #value1 "': " SIZE_FORMAT " '"        \
1203          #value2 "': " SIZE_FORMAT, value1, value2);
1204 
1205 
1206 class TestVirtualSpace : AllStatic {
1207   enum TestLargePages {
1208     Default,
1209     Disable,
1210     Reserve,
1211     Commit
1212   };
1213 
1214   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
1215     switch(mode) {
1216     default:
1217     case Default:
1218     case Reserve:
1219       return ReservedSpace(reserve_size_aligned);
1220     case Disable:
1221     case Commit:
1222       return ReservedSpace(reserve_size_aligned,
1223                            os::vm_allocation_granularity(),
1224                            /* large */ false, /* exec */ false);
1225     }
1226   }
1227 
1228   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1229     switch(mode) {
1230     default:
1231     case Default:
1232     case Reserve:
1233       return vs.initialize(rs, 0);
1234     case Disable:
1235       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1236     case Commit:
1237       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1238     }
1239   }
1240 
1241  public:
1242   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1243                                                         TestLargePages mode = Default) {
1244     size_t granularity = os::vm_allocation_granularity();
1245     size_t reserve_size_aligned = align_up(reserve_size, granularity);
1246 
1247     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1248 
1249     assert(reserved.is_reserved(), "Must be");
1250 
1251     VirtualSpace vs;
1252     bool initialized = initialize_virtual_space(vs, reserved, mode);
1253     assert(initialized, "Failed to initialize VirtualSpace");
1254 
1255     vs.expand_by(commit_size, false);
1256 
1257     if (vs.special()) {
1258       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1259     } else {
1260       assert_ge(vs.actual_committed_size(), commit_size);
1261       // Approximate the commit granularity.
1262       // Make sure that we don't commit using large pages
1263       // if large pages has been disabled for this VirtualSpace.
1264       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1265                                    os::vm_page_size() : os::large_page_size();
1266       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1267     }
1268 
1269     reserved.release();
1270   }
1271 
1272   static void test_virtual_space_actual_committed_space_one_large_page() {
1273     if (!UseLargePages) {
1274       return;
1275     }
1276 
1277     size_t large_page_size = os::large_page_size();
1278 
1279     ReservedSpace reserved(large_page_size, large_page_size, true, false);
1280 
1281     assert(reserved.is_reserved(), "Must be");
1282 
1283     VirtualSpace vs;
1284     bool initialized = vs.initialize(reserved, 0);
1285     assert(initialized, "Failed to initialize VirtualSpace");
1286 
1287     vs.expand_by(large_page_size, false);
1288 
1289     assert_equals(vs.actual_committed_size(), large_page_size);
1290 
1291     reserved.release();
1292   }
1293 
1294   static void test_virtual_space_actual_committed_space() {
1295     test_virtual_space_actual_committed_space(4 * K, 0);
1296     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1297     test_virtual_space_actual_committed_space(8 * K, 0);
1298     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1299     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1300     test_virtual_space_actual_committed_space(12 * K, 0);
1301     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1302     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1303     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1304     test_virtual_space_actual_committed_space(64 * K, 0);
1305     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1306     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1307     test_virtual_space_actual_committed_space(2 * M, 0);
1308     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1309     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1310     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1311     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1312     test_virtual_space_actual_committed_space(10 * M, 0);
1313     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1314     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1315     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1316     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1317     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1318     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1319   }
1320 
1321   static void test_virtual_space_disable_large_pages() {
1322     if (!UseLargePages) {
1323       return;
1324     }
1325     // These test cases verify that if we force VirtualSpace to disable large pages
1326     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1327     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1328     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1329     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1330     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1331     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1332     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1333 
1334     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1335     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1336     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1337     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1338     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1339     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1340     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1341 
1342     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1343     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1344     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1345     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1346     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1347     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1348     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1349   }
1350 
1351   static void test_virtual_space() {
1352     test_virtual_space_actual_committed_space();
1353     test_virtual_space_actual_committed_space_one_large_page();
1354     test_virtual_space_disable_large_pages();
1355   }
1356 };
1357 
1358 void TestVirtualSpace_test() {
1359   TestVirtualSpace::test_virtual_space();
1360 }
1361 
1362 #endif // PRODUCT
1363 
1364 #endif