1 /*
   2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "oops/markOop.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "runtime/virtualspace.hpp"
  29 #include "services/memTracker.hpp"
  30 
  31 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  32 
  33 // ReservedSpace
  34 
  35 // Dummy constructor
  36 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  37     _alignment(0), _special(false), _executable(false) {
  38 }
  39 
  40 ReservedSpace::ReservedSpace(size_t size) {
  41   size_t page_size = os::page_size_for_region(size, 1);
  42   bool large_pages = page_size != (size_t)os::vm_page_size();
  43   // Don't force the alignment to be large page aligned,
  44   // since that will waste memory.
  45   size_t alignment = os::vm_allocation_granularity();
  46   initialize(size, alignment, large_pages, NULL, false);
  47 }
  48 
  49 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  50                              bool large,
  51                              char* requested_address) {
  52   initialize(size, alignment, large, requested_address, false);
  53 }
  54 
  55 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  56                              bool large,
  57                              bool executable) {
  58   initialize(size, alignment, large, NULL, executable);
  59 }
  60 
  61 // Helper method.
  62 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  63                                            const size_t size, bool special)
  64 {
  65   if (base == requested_address || requested_address == NULL)
  66     return false; // did not fail
  67 
  68   if (base != NULL) {
  69     // Different reserve address may be acceptable in other cases
  70     // but for compressed oops heap should be at requested address.
  71     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  72     if (PrintCompressedOopsMode) {
  73       tty->cr();
  74       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
  75     }
  76     // OS ignored requested address. Try different address.
  77     if (special) {
  78       if (!os::release_memory_special(base, size)) {
  79         fatal("os::release_memory_special failed");
  80       }
  81     } else {
  82       if (!os::release_memory(base, size)) {
  83         fatal("os::release_memory failed");
  84       }
  85     }
  86   }
  87   return true;
  88 }
  89 
  90 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
  91                                char* requested_address,
  92                                bool executable) {
  93   const size_t granularity = os::vm_allocation_granularity();
  94   assert((size & (granularity - 1)) == 0,
  95          "size not aligned to os::vm_allocation_granularity()");
  96   assert((alignment & (granularity - 1)) == 0,
  97          "alignment not aligned to os::vm_allocation_granularity()");
  98   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
  99          "not a power of 2");
 100 
 101   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 102 
 103   _base = NULL;
 104   _size = 0;
 105   _special = false;
 106   _executable = executable;
 107   _alignment = 0;
 108   _noaccess_prefix = 0;
 109   if (size == 0) {
 110     return;
 111   }
 112 
 113   // If OS doesn't support demand paging for large page memory, we need
 114   // to use reserve_memory_special() to reserve and pin the entire region.
 115   bool special = large && !os::can_commit_large_page_memory();
 116   char* base = NULL;
 117 
 118   if (special) {
 119 
 120     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 121 
 122     if (base != NULL) {
 123       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 124         // OS ignored requested address. Try different address.
 125         return;
 126       }
 127       // Check alignment constraints.
 128       assert((uintptr_t) base % alignment == 0,
 129              err_msg("Large pages returned a non-aligned address, base: "
 130                  PTR_FORMAT " alignment: " PTR_FORMAT,
 131                  base, (void*)(uintptr_t)alignment));
 132       _special = true;
 133     } else {
 134       // failed; try to reserve regular memory below
 135       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 136                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 137         if (PrintCompressedOopsMode) {
 138           tty->cr();
 139           tty->print_cr("Reserve regular memory without large pages.");
 140         }
 141       }
 142     }
 143   }
 144 
 145   if (base == NULL) {
 146     // Optimistically assume that the OSes returns an aligned base pointer.
 147     // When reserving a large address range, most OSes seem to align to at
 148     // least 64K.
 149 
 150     // If the memory was requested at a particular address, use
 151     // os::attempt_reserve_memory_at() to avoid over mapping something
 152     // important.  If available space is not detected, return NULL.
 153 
 154     if (requested_address != 0) {
 155       base = os::attempt_reserve_memory_at(size, requested_address);
 156       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 157         // OS ignored requested address. Try different address.
 158         base = NULL;
 159       }
 160     } else {
 161       base = os::reserve_memory(size, NULL, alignment);
 162     }
 163 
 164     if (base == NULL) return;
 165 
 166     // Check alignment constraints
 167     if ((((size_t)base) & (alignment - 1)) != 0) {
 168       // Base not aligned, retry
 169       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 170       // Make sure that size is aligned
 171       size = align_size_up(size, alignment);
 172       base = os::reserve_memory_aligned(size, alignment);
 173 
 174       if (requested_address != 0 &&
 175           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 176         // As a result of the alignment constraints, the allocated base differs
 177         // from the requested address. Return back to the caller who can
 178         // take remedial action (like try again without a requested address).
 179         assert(_base == NULL, "should be");
 180         return;
 181       }
 182     }
 183   }
 184   // Done
 185   _base = base;
 186   _size = size;
 187   _alignment = alignment;
 188 }
 189 
 190 
 191 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 192                              bool special, bool executable) {
 193   assert((size % os::vm_allocation_granularity()) == 0,
 194          "size not allocation aligned");
 195   _base = base;
 196   _size = size;
 197   _alignment = alignment;
 198   _noaccess_prefix = 0;
 199   _special = special;
 200   _executable = executable;
 201 }
 202 
 203 
 204 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 205                                         bool split, bool realloc) {
 206   assert(partition_size <= size(), "partition failed");
 207   if (split) {
 208     os::split_reserved_memory(base(), size(), partition_size, realloc);
 209   }
 210   ReservedSpace result(base(), partition_size, alignment, special(),
 211                        executable());
 212   return result;
 213 }
 214 
 215 
 216 ReservedSpace
 217 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 218   assert(partition_size <= size(), "partition failed");
 219   ReservedSpace result(base() + partition_size, size() - partition_size,
 220                        alignment, special(), executable());
 221   return result;
 222 }
 223 
 224 
 225 size_t ReservedSpace::page_align_size_up(size_t size) {
 226   return align_size_up(size, os::vm_page_size());
 227 }
 228 
 229 
 230 size_t ReservedSpace::page_align_size_down(size_t size) {
 231   return align_size_down(size, os::vm_page_size());
 232 }
 233 
 234 
 235 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 236   return align_size_up(size, os::vm_allocation_granularity());
 237 }
 238 
 239 
 240 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 241   return align_size_down(size, os::vm_allocation_granularity());
 242 }
 243 
 244 
 245 void ReservedSpace::release() {
 246   if (is_reserved()) {
 247     char *real_base = _base - _noaccess_prefix;
 248     const size_t real_size = _size + _noaccess_prefix;
 249     if (special()) {
 250       os::release_memory_special(real_base, real_size);
 251     } else{
 252       os::release_memory(real_base, real_size);
 253     }
 254     _base = NULL;
 255     _size = 0;
 256     _noaccess_prefix = 0;
 257     _alignment = 0;
 258     _special = false;
 259     _executable = false;
 260   }
 261 }
 262 
 263 static size_t noaccess_prefix_size(size_t alignment) {
 264   return lcm(os::vm_page_size(), alignment);
 265 }
 266 
 267 void ReservedHeapSpace::establish_noaccess_prefix() {
 268   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 269   _noaccess_prefix = noaccess_prefix_size(_alignment);
 270 
 271   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 272     if (true
 273         WIN64_ONLY(&& !UseLargePages)
 274         AIX_ONLY(&& os::vm_page_size() != SIZE_64K)) {
 275       // Protect memory at the base of the allocated region.
 276       // If special, the page was committed (only matters on windows)
 277       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 278         fatal("cannot protect protection page");
 279       }
 280       if (PrintCompressedOopsMode) {
 281         tty->cr();
 282         tty->print_cr("Protected page at the reserved heap base: "
 283                       PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
 284       }
 285       assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
 286     } else {
 287       Universe::set_narrow_oop_use_implicit_null_checks(false);
 288     }
 289   }
 290 
 291   _base += _noaccess_prefix;
 292   _size -= _noaccess_prefix;
 293   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 294 }
 295 
 296 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 297 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 298 // might still fulfill the wishes of the caller.
 299 // Assures the memory is aligned to 'alignment'.
 300 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 301 void ReservedHeapSpace::try_reserve_heap(size_t size,
 302                                          size_t alignment,
 303                                          bool large,
 304                                          char* requested_address) {
 305   if (_base != NULL) {
 306     // We tried before, but we didn't like the address delivered.
 307     release();
 308   }
 309 
 310   // If OS doesn't support demand paging for large page memory, we need
 311   // to use reserve_memory_special() to reserve and pin the entire region.
 312   bool special = large && !os::can_commit_large_page_memory();
 313   char* base = NULL;
 314 
 315   if (PrintCompressedOopsMode && Verbose) {
 316     tty->print("Trying to allocate at address " PTR_FORMAT " heap of size " PTR_FORMAT ".\n",
 317                requested_address, (address)size);
 318   }
 319 
 320   if (special) {
 321     base = os::reserve_memory_special(size, alignment, requested_address, false);
 322 
 323     if (base != NULL) {
 324       // Check alignment constraints.
 325       assert((uintptr_t) base % alignment == 0,
 326              err_msg("Large pages returned a non-aligned address, base: "
 327                      PTR_FORMAT " alignment: " PTR_FORMAT,
 328                      base, (void*)(uintptr_t)alignment));
 329       _special = true;
 330     }
 331   }
 332 
 333   if (base == NULL) {
 334     // Failed; try to reserve regular memory below
 335     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 336                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 337       if (PrintCompressedOopsMode) {
 338         tty->cr();
 339         tty->print_cr("Reserve regular memory without large pages.");
 340       }
 341     }
 342 
 343     // Optimistically assume that the OSes returns an aligned base pointer.
 344     // When reserving a large address range, most OSes seem to align to at
 345     // least 64K.
 346 
 347     // If the memory was requested at a particular address, use
 348     // os::attempt_reserve_memory_at() to avoid over mapping something
 349     // important.  If available space is not detected, return NULL.
 350 
 351     if (requested_address != 0) {
 352       base = os::attempt_reserve_memory_at(size, requested_address);
 353     } else {
 354       base = os::reserve_memory(size, NULL, alignment);
 355     }
 356   }
 357   if (base == NULL) { return; }
 358 
 359   // Done
 360   _base = base;
 361   _size = size;
 362   _alignment = alignment;
 363 
 364   // Check alignment constraints
 365   if ((((size_t)base) & (alignment - 1)) != 0) {
 366     // Base not aligned, retry.
 367     release();
 368   }
 369 }
 370 
 371 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 372                                           char *lowest_start,
 373                                           size_t attach_point_alignment,
 374                                           char *aligned_heap_base_min_address,
 375                                           char *upper_bound,
 376                                           size_t size,
 377                                           size_t alignment,
 378                                           bool large) {
 379   const size_t attach_range = highest_start - lowest_start;
 380   // Cap num_attempts at possible number.
 381   // At least one is possible even for 0 sized attach range.
 382   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
 383   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
 384 
 385   const size_t stepsize = (attach_range == 0) ? // Only one try.
 386     (size_t) highest_start : align_size_up(attach_range / num_attempts_to_try, attach_point_alignment);
 387 
 388   // Try attach points from top to bottom.
 389   char* attach_point = highest_start;
 390   while (attach_point >= lowest_start  &&
 391          attach_point <= highest_start &&  // Avoid wrap around.
 392          ((_base == NULL) ||
 393           (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
 394     try_reserve_heap(size, alignment, large, attach_point);
 395     attach_point -= stepsize;
 396   }
 397 }
 398 
 399 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
 400 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
 401 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
 402 
 403 // Helper for heap allocation. Returns an array with addresses
 404 // (OS-specific) which are suited for disjoint base mode. Array is
 405 // NULL terminated.
 406 static char** get_attach_addresses_for_disjoint_mode() {
 407   static uint64_t addresses[] = {
 408      2 * SIZE_32G,
 409      3 * SIZE_32G,
 410      4 * SIZE_32G,
 411      8 * SIZE_32G,
 412     10 * SIZE_32G,
 413      1 * SIZE_64K * SIZE_32G,
 414      2 * SIZE_64K * SIZE_32G,
 415      3 * SIZE_64K * SIZE_32G,
 416      4 * SIZE_64K * SIZE_32G,
 417     16 * SIZE_64K * SIZE_32G,
 418     32 * SIZE_64K * SIZE_32G,
 419     34 * SIZE_64K * SIZE_32G,
 420     0
 421   };
 422 
 423   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
 424   // the array is sorted.
 425   uint i = 0;
 426   while (addresses[i] != 0 &&
 427          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
 428     i++;
 429   }
 430   uint start = i;
 431 
 432   // Avoid more steps than requested.
 433   i = 0;
 434   while (addresses[start+i] != 0) {
 435     if (i == HeapSearchSteps) {
 436       addresses[start+i] = 0;
 437       break;
 438     }
 439     i++;
 440   }
 441 
 442   return (char**) &addresses[start];
 443 }
 444 
 445 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
 446   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
 447             "can not allocate compressed oop heap for this size");
 448   guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
 449   assert(HeapBaseMinAddress > 0, "sanity");
 450 
 451   const size_t granularity = os::vm_allocation_granularity();
 452   assert((size & (granularity - 1)) == 0,
 453          "size not aligned to os::vm_allocation_granularity()");
 454   assert((alignment & (granularity - 1)) == 0,
 455          "alignment not aligned to os::vm_allocation_granularity()");
 456   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 457          "not a power of 2");
 458 
 459   // The necessary attach point alignment for generated wish addresses.
 460   // This is needed to increase the chance of attaching for mmap and shmat.
 461   const size_t os_attach_point_alignment =
 462     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
 463     NOT_AIX(os::vm_allocation_granularity());
 464   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 465 
 466   char *aligned_heap_base_min_address = (char *)align_ptr_up((void *)HeapBaseMinAddress, alignment);
 467   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
 468     noaccess_prefix_size(alignment) : 0;
 469 
 470   // Attempt to alloc at user-given address.
 471   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 472     try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
 473     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
 474       release();
 475     }
 476   }
 477 
 478   // Keep heap at HeapBaseMinAddress.
 479   if (_base == NULL) {
 480 
 481     // Try to allocate the heap at addresses that allow efficient oop compression.
 482     // Different schemes are tried, in order of decreasing optimization potential.
 483     //
 484     // For this, try_reserve_heap() is called with the desired heap base addresses.
 485     // A call into the os layer to allocate at a given address can return memory
 486     // at a different address than requested.  Still, this might be memory at a useful
 487     // address. try_reserve_heap() always returns this allocated memory, as only here
 488     // the criteria for a good heap are checked.
 489 
 490     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 491     // Give it several tries from top of range to bottom.
 492     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
 493 
 494       // Calc address range within we try to attach (range of possible start addresses).
 495       char* const highest_start = (char *)align_ptr_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 496       char* const lowest_start  = (char *)align_ptr_up  (        aligned_heap_base_min_address             , attach_point_alignment);
 497       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 498                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
 499     }
 500 
 501     // zerobased: Attempt to allocate in the lower 32G.
 502     // But leave room for the compressed class pointers, which is allocated above
 503     // the heap.
 504     char *zerobased_max = (char *)OopEncodingHeapMax;
 505     // For small heaps, save some space for compressed class pointer
 506     // space so it can be decoded with no base.
 507     if (UseCompressedClassPointers && !UseSharedSpaces &&
 508         OopEncodingHeapMax <= KlassEncodingMetaspaceMax) {
 509       const size_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
 510       zerobased_max = (char *)OopEncodingHeapMax - class_space;
 511     }
 512 
 513     // Give it several tries from top of range to bottom.
 514     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
 515         ((_base == NULL) ||                        // No previous try succeeded.
 516          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 517 
 518       // Calc address range within we try to attach (range of possible start addresses).
 519       char *const highest_start = (char *)align_ptr_down(zerobased_max - size, attach_point_alignment);
 520       // SS10 and SS12u1 cannot compile "(char *)UnscaledOopHeapMax - size" on solaris sparc 32-bit:
 521       // "Cannot use int to initialize char*." Introduce aux variable.
 522       char *unscaled_end = (char *)UnscaledOopHeapMax;
 523       unscaled_end -= size;
 524       char *lowest_start = (size < UnscaledOopHeapMax) ?
 525         MAX2(unscaled_end, aligned_heap_base_min_address) : aligned_heap_base_min_address;
 526       lowest_start  = (char *)align_ptr_up(lowest_start, attach_point_alignment);
 527       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 528                         aligned_heap_base_min_address, zerobased_max, size, alignment, large);
 529     }
 530 
 531     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 532     // implement null checks.
 533     noaccess_prefix = noaccess_prefix_size(alignment);
 534 
 535     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
 536     char** addresses = get_attach_addresses_for_disjoint_mode();
 537     int i = 0;
 538     while (addresses[i] &&                                 // End of array not yet reached.
 539            ((_base == NULL) ||                             // No previous try succeeded.
 540             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 541              !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 542       char* const attach_point = addresses[i];
 543       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 544       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 545       i++;
 546     }
 547 
 548     // Last, desperate try without any placement.
 549     if (_base == NULL) {
 550       if (PrintCompressedOopsMode && Verbose) {
 551         tty->print("Trying to allocate at address NULL heap of size " PTR_FORMAT ".\n", (address)size + noaccess_prefix);
 552       }
 553       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 554     }
 555   }
 556 }
 557 
 558 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() {
 559 
 560   if (size == 0) {
 561     return;
 562   }
 563 
 564   // Heap size should be aligned to alignment, too.
 565   guarantee(is_size_aligned(size, alignment), "set by caller");
 566 
 567   if (UseCompressedOops) {
 568     initialize_compressed_heap(size, alignment, large);
 569     if (_size > size) {
 570       // We allocated heap with noaccess prefix.
 571       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 572       // if we had to try at arbitrary address.
 573       establish_noaccess_prefix();
 574     }
 575   } else {
 576     initialize(size, alignment, large, NULL, false);
 577   }
 578 
 579   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 580          "area must be distinguishable from marks for mark-sweep");
 581   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 582          "area must be distinguishable from marks for mark-sweep");
 583 
 584   if (base() > 0) {
 585     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 586   }
 587 }
 588 
 589 // Reserve space for code segment.  Same as Java heap only we mark this as
 590 // executable.
 591 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 592                                      size_t rs_align,
 593                                      bool large) :
 594   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 595   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 596 }
 597 
 598 // VirtualSpace
 599 
 600 VirtualSpace::VirtualSpace() {
 601   _low_boundary           = NULL;
 602   _high_boundary          = NULL;
 603   _low                    = NULL;
 604   _high                   = NULL;
 605   _lower_high             = NULL;
 606   _middle_high            = NULL;
 607   _upper_high             = NULL;
 608   _lower_high_boundary    = NULL;
 609   _middle_high_boundary   = NULL;
 610   _upper_high_boundary    = NULL;
 611   _lower_alignment        = 0;
 612   _middle_alignment       = 0;
 613   _upper_alignment        = 0;
 614   _special                = false;
 615   _executable             = false;
 616 }
 617 
 618 
 619 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 620   const size_t max_commit_granularity = os::page_size_for_region(rs.size(), 1);
 621   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 622 }
 623 
 624 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 625   if(!rs.is_reserved()) return false;  // allocation failed.
 626   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 627   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 628 
 629   _low_boundary  = rs.base();
 630   _high_boundary = low_boundary() + rs.size();
 631 
 632   _low = low_boundary();
 633   _high = low();
 634 
 635   _special = rs.special();
 636   _executable = rs.executable();
 637 
 638   // When a VirtualSpace begins life at a large size, make all future expansion
 639   // and shrinking occur aligned to a granularity of large pages.  This avoids
 640   // fragmentation of physical addresses that inhibits the use of large pages
 641   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 642   // page size, the only spaces that get handled this way are codecache and
 643   // the heap itself, both of which provide a substantial performance
 644   // boost in many benchmarks when covered by large pages.
 645   //
 646   // No attempt is made to force large page alignment at the very top and
 647   // bottom of the space if they are not aligned so already.
 648   _lower_alignment  = os::vm_page_size();
 649   _middle_alignment = max_commit_granularity;
 650   _upper_alignment  = os::vm_page_size();
 651 
 652   // End of each region
 653   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
 654   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
 655   _upper_high_boundary = high_boundary();
 656 
 657   // High address of each region
 658   _lower_high = low_boundary();
 659   _middle_high = lower_high_boundary();
 660   _upper_high = middle_high_boundary();
 661 
 662   // commit to initial size
 663   if (committed_size > 0) {
 664     if (!expand_by(committed_size)) {
 665       return false;
 666     }
 667   }
 668   return true;
 669 }
 670 
 671 
 672 VirtualSpace::~VirtualSpace() {
 673   release();
 674 }
 675 
 676 
 677 void VirtualSpace::release() {
 678   // This does not release memory it never reserved.
 679   // Caller must release via rs.release();
 680   _low_boundary           = NULL;
 681   _high_boundary          = NULL;
 682   _low                    = NULL;
 683   _high                   = NULL;
 684   _lower_high             = NULL;
 685   _middle_high            = NULL;
 686   _upper_high             = NULL;
 687   _lower_high_boundary    = NULL;
 688   _middle_high_boundary   = NULL;
 689   _upper_high_boundary    = NULL;
 690   _lower_alignment        = 0;
 691   _middle_alignment       = 0;
 692   _upper_alignment        = 0;
 693   _special                = false;
 694   _executable             = false;
 695 }
 696 
 697 
 698 size_t VirtualSpace::committed_size() const {
 699   return pointer_delta(high(), low(), sizeof(char));
 700 }
 701 
 702 
 703 size_t VirtualSpace::reserved_size() const {
 704   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 705 }
 706 
 707 
 708 size_t VirtualSpace::uncommitted_size()  const {
 709   return reserved_size() - committed_size();
 710 }
 711 
 712 size_t VirtualSpace::actual_committed_size() const {
 713   // Special VirtualSpaces commit all reserved space up front.
 714   if (special()) {
 715     return reserved_size();
 716   }
 717 
 718   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 719   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 720   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 721 
 722 #ifdef ASSERT
 723   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 724   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 725   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 726 
 727   if (committed_high > 0) {
 728     assert(committed_low == lower, "Must be");
 729     assert(committed_middle == middle, "Must be");
 730   }
 731 
 732   if (committed_middle > 0) {
 733     assert(committed_low == lower, "Must be");
 734   }
 735   if (committed_middle < middle) {
 736     assert(committed_high == 0, "Must be");
 737   }
 738 
 739   if (committed_low < lower) {
 740     assert(committed_high == 0, "Must be");
 741     assert(committed_middle == 0, "Must be");
 742   }
 743 #endif
 744 
 745   return committed_low + committed_middle + committed_high;
 746 }
 747 
 748 
 749 bool VirtualSpace::contains(const void* p) const {
 750   return low() <= (const char*) p && (const char*) p < high();
 751 }
 752 
 753 /*
 754    First we need to determine if a particular virtual space is using large
 755    pages.  This is done at the initialize function and only virtual spaces
 756    that are larger than LargePageSizeInBytes use large pages.  Once we
 757    have determined this, all expand_by and shrink_by calls must grow and
 758    shrink by large page size chunks.  If a particular request
 759    is within the current large page, the call to commit and uncommit memory
 760    can be ignored.  In the case that the low and high boundaries of this
 761    space is not large page aligned, the pages leading to the first large
 762    page address and the pages after the last large page address must be
 763    allocated with default pages.
 764 */
 765 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 766   if (uncommitted_size() < bytes) return false;
 767 
 768   if (special()) {
 769     // don't commit memory if the entire space is pinned in memory
 770     _high += bytes;
 771     return true;
 772   }
 773 
 774   char* previous_high = high();
 775   char* unaligned_new_high = high() + bytes;
 776   assert(unaligned_new_high <= high_boundary(),
 777          "cannot expand by more than upper boundary");
 778 
 779   // Calculate where the new high for each of the regions should be.  If
 780   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 781   // then the unaligned lower and upper new highs would be the
 782   // lower_high() and upper_high() respectively.
 783   char* unaligned_lower_new_high =
 784     MIN2(unaligned_new_high, lower_high_boundary());
 785   char* unaligned_middle_new_high =
 786     MIN2(unaligned_new_high, middle_high_boundary());
 787   char* unaligned_upper_new_high =
 788     MIN2(unaligned_new_high, upper_high_boundary());
 789 
 790   // Align the new highs based on the regions alignment.  lower and upper
 791   // alignment will always be default page size.  middle alignment will be
 792   // LargePageSizeInBytes if the actual size of the virtual space is in
 793   // fact larger than LargePageSizeInBytes.
 794   char* aligned_lower_new_high =
 795     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 796   char* aligned_middle_new_high =
 797     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 798   char* aligned_upper_new_high =
 799     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 800 
 801   // Determine which regions need to grow in this expand_by call.
 802   // If you are growing in the lower region, high() must be in that
 803   // region so calculate the size based on high().  For the middle and
 804   // upper regions, determine the starting point of growth based on the
 805   // location of high().  By getting the MAX of the region's low address
 806   // (or the previous region's high address) and high(), we can tell if it
 807   // is an intra or inter region growth.
 808   size_t lower_needs = 0;
 809   if (aligned_lower_new_high > lower_high()) {
 810     lower_needs =
 811       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 812   }
 813   size_t middle_needs = 0;
 814   if (aligned_middle_new_high > middle_high()) {
 815     middle_needs =
 816       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 817   }
 818   size_t upper_needs = 0;
 819   if (aligned_upper_new_high > upper_high()) {
 820     upper_needs =
 821       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 822   }
 823 
 824   // Check contiguity.
 825   assert(low_boundary() <= lower_high() &&
 826          lower_high() <= lower_high_boundary(),
 827          "high address must be contained within the region");
 828   assert(lower_high_boundary() <= middle_high() &&
 829          middle_high() <= middle_high_boundary(),
 830          "high address must be contained within the region");
 831   assert(middle_high_boundary() <= upper_high() &&
 832          upper_high() <= upper_high_boundary(),
 833          "high address must be contained within the region");
 834 
 835   // Commit regions
 836   if (lower_needs > 0) {
 837     assert(low_boundary() <= lower_high() &&
 838            lower_high() + lower_needs <= lower_high_boundary(),
 839            "must not expand beyond region");
 840     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
 841       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 842                          ", lower_needs=" SIZE_FORMAT ", %d) failed",
 843                          lower_high(), lower_needs, _executable);)
 844       return false;
 845     } else {
 846       _lower_high += lower_needs;
 847     }
 848   }
 849   if (middle_needs > 0) {
 850     assert(lower_high_boundary() <= middle_high() &&
 851            middle_high() + middle_needs <= middle_high_boundary(),
 852            "must not expand beyond region");
 853     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
 854                            _executable)) {
 855       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 856                          ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
 857                          ", %d) failed", middle_high(), middle_needs,
 858                          middle_alignment(), _executable);)
 859       return false;
 860     }
 861     _middle_high += middle_needs;
 862   }
 863   if (upper_needs > 0) {
 864     assert(middle_high_boundary() <= upper_high() &&
 865            upper_high() + upper_needs <= upper_high_boundary(),
 866            "must not expand beyond region");
 867     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
 868       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 869                          ", upper_needs=" SIZE_FORMAT ", %d) failed",
 870                          upper_high(), upper_needs, _executable);)
 871       return false;
 872     } else {
 873       _upper_high += upper_needs;
 874     }
 875   }
 876 
 877   if (pre_touch || AlwaysPreTouch) {
 878     int vm_ps = os::vm_page_size();
 879     for (char* curr = previous_high;
 880          curr < unaligned_new_high;
 881          curr += vm_ps) {
 882       // Note the use of a write here; originally we tried just a read, but
 883       // since the value read was unused, the optimizer removed the read.
 884       // If we ever have a concurrent touchahead thread, we'll want to use
 885       // a read, to avoid the potential of overwriting data (if a mutator
 886       // thread beats the touchahead thread to a page).  There are various
 887       // ways of making sure this read is not optimized away: for example,
 888       // generating the code for a read procedure at runtime.
 889       *curr = 0;
 890     }
 891   }
 892 
 893   _high += bytes;
 894   return true;
 895 }
 896 
 897 // A page is uncommitted if the contents of the entire page is deemed unusable.
 898 // Continue to decrement the high() pointer until it reaches a page boundary
 899 // in which case that particular page can now be uncommitted.
 900 void VirtualSpace::shrink_by(size_t size) {
 901   if (committed_size() < size)
 902     fatal("Cannot shrink virtual space to negative size");
 903 
 904   if (special()) {
 905     // don't uncommit if the entire space is pinned in memory
 906     _high -= size;
 907     return;
 908   }
 909 
 910   char* unaligned_new_high = high() - size;
 911   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 912 
 913   // Calculate new unaligned address
 914   char* unaligned_upper_new_high =
 915     MAX2(unaligned_new_high, middle_high_boundary());
 916   char* unaligned_middle_new_high =
 917     MAX2(unaligned_new_high, lower_high_boundary());
 918   char* unaligned_lower_new_high =
 919     MAX2(unaligned_new_high, low_boundary());
 920 
 921   // Align address to region's alignment
 922   char* aligned_upper_new_high =
 923     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 924   char* aligned_middle_new_high =
 925     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 926   char* aligned_lower_new_high =
 927     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 928 
 929   // Determine which regions need to shrink
 930   size_t upper_needs = 0;
 931   if (aligned_upper_new_high < upper_high()) {
 932     upper_needs =
 933       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 934   }
 935   size_t middle_needs = 0;
 936   if (aligned_middle_new_high < middle_high()) {
 937     middle_needs =
 938       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 939   }
 940   size_t lower_needs = 0;
 941   if (aligned_lower_new_high < lower_high()) {
 942     lower_needs =
 943       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 944   }
 945 
 946   // Check contiguity.
 947   assert(middle_high_boundary() <= upper_high() &&
 948          upper_high() <= upper_high_boundary(),
 949          "high address must be contained within the region");
 950   assert(lower_high_boundary() <= middle_high() &&
 951          middle_high() <= middle_high_boundary(),
 952          "high address must be contained within the region");
 953   assert(low_boundary() <= lower_high() &&
 954          lower_high() <= lower_high_boundary(),
 955          "high address must be contained within the region");
 956 
 957   // Uncommit
 958   if (upper_needs > 0) {
 959     assert(middle_high_boundary() <= aligned_upper_new_high &&
 960            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 961            "must not shrink beyond region");
 962     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 963       debug_only(warning("os::uncommit_memory failed"));
 964       return;
 965     } else {
 966       _upper_high -= upper_needs;
 967     }
 968   }
 969   if (middle_needs > 0) {
 970     assert(lower_high_boundary() <= aligned_middle_new_high &&
 971            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 972            "must not shrink beyond region");
 973     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 974       debug_only(warning("os::uncommit_memory failed"));
 975       return;
 976     } else {
 977       _middle_high -= middle_needs;
 978     }
 979   }
 980   if (lower_needs > 0) {
 981     assert(low_boundary() <= aligned_lower_new_high &&
 982            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 983            "must not shrink beyond region");
 984     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 985       debug_only(warning("os::uncommit_memory failed"));
 986       return;
 987     } else {
 988       _lower_high -= lower_needs;
 989     }
 990   }
 991 
 992   _high -= size;
 993 }
 994 
 995 #ifndef PRODUCT
 996 void VirtualSpace::check_for_contiguity() {
 997   // Check contiguity.
 998   assert(low_boundary() <= lower_high() &&
 999          lower_high() <= lower_high_boundary(),
1000          "high address must be contained within the region");
1001   assert(lower_high_boundary() <= middle_high() &&
1002          middle_high() <= middle_high_boundary(),
1003          "high address must be contained within the region");
1004   assert(middle_high_boundary() <= upper_high() &&
1005          upper_high() <= upper_high_boundary(),
1006          "high address must be contained within the region");
1007   assert(low() >= low_boundary(), "low");
1008   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
1009   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
1010   assert(high() <= upper_high(), "upper high");
1011 }
1012 
1013 void VirtualSpace::print_on(outputStream* out) {
1014   out->print   ("Virtual space:");
1015   if (special()) out->print(" (pinned in memory)");
1016   out->cr();
1017   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
1018   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
1019   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
1020   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
1021 }
1022 
1023 void VirtualSpace::print() {
1024   print_on(tty);
1025 }
1026 
1027 /////////////// Unit tests ///////////////
1028 
1029 #ifndef PRODUCT
1030 
1031 #define test_log(...) \
1032   do {\
1033     if (VerboseInternalVMTests) { \
1034       tty->print_cr(__VA_ARGS__); \
1035       tty->flush(); \
1036     }\
1037   } while (false)
1038 
1039 class TestReservedSpace : AllStatic {
1040  public:
1041   static void small_page_write(void* addr, size_t size) {
1042     size_t page_size = os::vm_page_size();
1043 
1044     char* end = (char*)addr + size;
1045     for (char* p = (char*)addr; p < end; p += page_size) {
1046       *p = 1;
1047     }
1048   }
1049 
1050   static void release_memory_for_test(ReservedSpace rs) {
1051     if (rs.special()) {
1052       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1053     } else {
1054       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1055     }
1056   }
1057 
1058   static void test_reserved_space1(size_t size, size_t alignment) {
1059     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
1060 
1061     assert(is_size_aligned(size, alignment), "Incorrect input parameters");
1062 
1063     ReservedSpace rs(size,          // size
1064                      alignment,     // alignment
1065                      UseLargePages, // large
1066                      (char *)NULL); // requested_address
1067 
1068     test_log(" rs.special() == %d", rs.special());
1069 
1070     assert(rs.base() != NULL, "Must be");
1071     assert(rs.size() == size, "Must be");
1072 
1073     assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1074     assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1075 
1076     if (rs.special()) {
1077       small_page_write(rs.base(), size);
1078     }
1079 
1080     release_memory_for_test(rs);
1081   }
1082 
1083   static void test_reserved_space2(size_t size) {
1084     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
1085 
1086     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1087 
1088     ReservedSpace rs(size);
1089 
1090     test_log(" rs.special() == %d", rs.special());
1091 
1092     assert(rs.base() != NULL, "Must be");
1093     assert(rs.size() == size, "Must be");
1094 
1095     if (rs.special()) {
1096       small_page_write(rs.base(), size);
1097     }
1098 
1099     release_memory_for_test(rs);
1100   }
1101 
1102   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1103     test_log("test_reserved_space3(%p, %p, %d)",
1104         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
1105 
1106     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1107     assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
1108 
1109     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1110 
1111     ReservedSpace rs(size, alignment, large, false);
1112 
1113     test_log(" rs.special() == %d", rs.special());
1114 
1115     assert(rs.base() != NULL, "Must be");
1116     assert(rs.size() == size, "Must be");
1117 
1118     if (rs.special()) {
1119       small_page_write(rs.base(), size);
1120     }
1121 
1122     release_memory_for_test(rs);
1123   }
1124 
1125 
1126   static void test_reserved_space1() {
1127     size_t size = 2 * 1024 * 1024;
1128     size_t ag   = os::vm_allocation_granularity();
1129 
1130     test_reserved_space1(size,      ag);
1131     test_reserved_space1(size * 2,  ag);
1132     test_reserved_space1(size * 10, ag);
1133   }
1134 
1135   static void test_reserved_space2() {
1136     size_t size = 2 * 1024 * 1024;
1137     size_t ag = os::vm_allocation_granularity();
1138 
1139     test_reserved_space2(size * 1);
1140     test_reserved_space2(size * 2);
1141     test_reserved_space2(size * 10);
1142     test_reserved_space2(ag);
1143     test_reserved_space2(size - ag);
1144     test_reserved_space2(size);
1145     test_reserved_space2(size + ag);
1146     test_reserved_space2(size * 2);
1147     test_reserved_space2(size * 2 - ag);
1148     test_reserved_space2(size * 2 + ag);
1149     test_reserved_space2(size * 3);
1150     test_reserved_space2(size * 3 - ag);
1151     test_reserved_space2(size * 3 + ag);
1152     test_reserved_space2(size * 10);
1153     test_reserved_space2(size * 10 + size / 2);
1154   }
1155 
1156   static void test_reserved_space3() {
1157     size_t ag = os::vm_allocation_granularity();
1158 
1159     test_reserved_space3(ag,      ag    , false);
1160     test_reserved_space3(ag * 2,  ag    , false);
1161     test_reserved_space3(ag * 3,  ag    , false);
1162     test_reserved_space3(ag * 2,  ag * 2, false);
1163     test_reserved_space3(ag * 4,  ag * 2, false);
1164     test_reserved_space3(ag * 8,  ag * 2, false);
1165     test_reserved_space3(ag * 4,  ag * 4, false);
1166     test_reserved_space3(ag * 8,  ag * 4, false);
1167     test_reserved_space3(ag * 16, ag * 4, false);
1168 
1169     if (UseLargePages) {
1170       size_t lp = os::large_page_size();
1171 
1172       // Without large pages
1173       test_reserved_space3(lp,     ag * 4, false);
1174       test_reserved_space3(lp * 2, ag * 4, false);
1175       test_reserved_space3(lp * 4, ag * 4, false);
1176       test_reserved_space3(lp,     lp    , false);
1177       test_reserved_space3(lp * 2, lp    , false);
1178       test_reserved_space3(lp * 3, lp    , false);
1179       test_reserved_space3(lp * 2, lp * 2, false);
1180       test_reserved_space3(lp * 4, lp * 2, false);
1181       test_reserved_space3(lp * 8, lp * 2, false);
1182 
1183       // With large pages
1184       test_reserved_space3(lp, ag * 4    , true);
1185       test_reserved_space3(lp * 2, ag * 4, true);
1186       test_reserved_space3(lp * 4, ag * 4, true);
1187       test_reserved_space3(lp, lp        , true);
1188       test_reserved_space3(lp * 2, lp    , true);
1189       test_reserved_space3(lp * 3, lp    , true);
1190       test_reserved_space3(lp * 2, lp * 2, true);
1191       test_reserved_space3(lp * 4, lp * 2, true);
1192       test_reserved_space3(lp * 8, lp * 2, true);
1193     }
1194   }
1195 
1196   static void test_reserved_space() {
1197     test_reserved_space1();
1198     test_reserved_space2();
1199     test_reserved_space3();
1200   }
1201 };
1202 
1203 void TestReservedSpace_test() {
1204   TestReservedSpace::test_reserved_space();
1205 }
1206 
1207 #define assert_equals(actual, expected)     \
1208   assert(actual == expected,                \
1209     err_msg("Got " SIZE_FORMAT " expected " \
1210       SIZE_FORMAT, actual, expected));
1211 
1212 #define assert_ge(value1, value2)                  \
1213   assert(value1 >= value2,                         \
1214     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
1215       #value2 "': " SIZE_FORMAT, value1, value2));
1216 
1217 #define assert_lt(value1, value2)                  \
1218   assert(value1 < value2,                          \
1219     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
1220       #value2 "': " SIZE_FORMAT, value1, value2));
1221 
1222 
1223 class TestVirtualSpace : AllStatic {
1224   enum TestLargePages {
1225     Default,
1226     Disable,
1227     Reserve,
1228     Commit
1229   };
1230 
1231   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
1232     switch(mode) {
1233     default:
1234     case Default:
1235     case Reserve:
1236       return ReservedSpace(reserve_size_aligned);
1237     case Disable:
1238     case Commit:
1239       return ReservedSpace(reserve_size_aligned,
1240                            os::vm_allocation_granularity(),
1241                            /* large */ false, /* exec */ false);
1242     }
1243   }
1244 
1245   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1246     switch(mode) {
1247     default:
1248     case Default:
1249     case Reserve:
1250       return vs.initialize(rs, 0);
1251     case Disable:
1252       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1253     case Commit:
1254       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region(rs.size(), 1));
1255     }
1256   }
1257 
1258  public:
1259   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1260                                                         TestLargePages mode = Default) {
1261     size_t granularity = os::vm_allocation_granularity();
1262     size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
1263 
1264     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1265 
1266     assert(reserved.is_reserved(), "Must be");
1267 
1268     VirtualSpace vs;
1269     bool initialized = initialize_virtual_space(vs, reserved, mode);
1270     assert(initialized, "Failed to initialize VirtualSpace");
1271 
1272     vs.expand_by(commit_size, false);
1273 
1274     if (vs.special()) {
1275       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1276     } else {
1277       assert_ge(vs.actual_committed_size(), commit_size);
1278       // Approximate the commit granularity.
1279       // Make sure that we don't commit using large pages
1280       // if large pages has been disabled for this VirtualSpace.
1281       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1282                                    os::vm_page_size() : os::large_page_size();
1283       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1284     }
1285 
1286     reserved.release();
1287   }
1288 
1289   static void test_virtual_space_actual_committed_space_one_large_page() {
1290     if (!UseLargePages) {
1291       return;
1292     }
1293 
1294     size_t large_page_size = os::large_page_size();
1295 
1296     ReservedSpace reserved(large_page_size, large_page_size, true, false);
1297 
1298     assert(reserved.is_reserved(), "Must be");
1299 
1300     VirtualSpace vs;
1301     bool initialized = vs.initialize(reserved, 0);
1302     assert(initialized, "Failed to initialize VirtualSpace");
1303 
1304     vs.expand_by(large_page_size, false);
1305 
1306     assert_equals(vs.actual_committed_size(), large_page_size);
1307 
1308     reserved.release();
1309   }
1310 
1311   static void test_virtual_space_actual_committed_space() {
1312     test_virtual_space_actual_committed_space(4 * K, 0);
1313     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1314     test_virtual_space_actual_committed_space(8 * K, 0);
1315     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1316     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1317     test_virtual_space_actual_committed_space(12 * K, 0);
1318     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1319     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1320     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1321     test_virtual_space_actual_committed_space(64 * K, 0);
1322     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1323     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1324     test_virtual_space_actual_committed_space(2 * M, 0);
1325     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1326     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1327     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1328     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1329     test_virtual_space_actual_committed_space(10 * M, 0);
1330     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1331     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1332     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1333     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1334     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1335     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1336   }
1337 
1338   static void test_virtual_space_disable_large_pages() {
1339     if (!UseLargePages) {
1340       return;
1341     }
1342     // These test cases verify that if we force VirtualSpace to disable large pages
1343     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1344     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1345     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1346     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1347     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1348     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1349     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1350 
1351     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1352     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1353     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1354     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1355     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1356     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1357     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1358 
1359     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1360     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1361     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1362     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1363     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1364     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1365     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1366   }
1367 
1368   static void test_virtual_space() {
1369     test_virtual_space_actual_committed_space();
1370     test_virtual_space_actual_committed_space_one_large_page();
1371     test_virtual_space_disable_large_pages();
1372   }
1373 };
1374 
1375 void TestVirtualSpace_test() {
1376   TestVirtualSpace::test_virtual_space();
1377 }
1378 
1379 #endif // PRODUCT
1380 
1381 #endif