1 /*
   2  * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "oops/markOop.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "runtime/virtualspace.hpp"
  29 #include "services/memTracker.hpp"
  30 
  31 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  32 
  33 // ReservedSpace
  34 
  35 // Dummy constructor
  36 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  37     _alignment(0), _special(false), _executable(false) {
  38 }
  39 
  40 ReservedSpace::ReservedSpace(size_t size) {
  41   // Want to use large pages where possible and pad with small pages.
  42   size_t page_size = os::page_size_for_region_unaligned(size, 1);
  43   bool large_pages = page_size != (size_t)os::vm_page_size();
  44   // Don't force the alignment to be large page aligned,
  45   // since that will waste memory.
  46   size_t alignment = os::vm_allocation_granularity();
  47   initialize(size, alignment, large_pages, NULL, false);
  48 }
  49 
  50 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  51                              bool large,
  52                              char* requested_address) {
  53   initialize(size, alignment, large, requested_address, false);
  54 }
  55 
  56 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  57                              bool large,
  58                              bool executable) {
  59   initialize(size, alignment, large, NULL, executable);
  60 }
  61 
  62 // Helper method.
  63 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  64                                            const size_t size, bool special)
  65 {
  66   if (base == requested_address || requested_address == NULL)
  67     return false; // did not fail
  68 
  69   if (base != NULL) {
  70     // Different reserve address may be acceptable in other cases
  71     // but for compressed oops heap should be at requested address.
  72     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  73     if (PrintCompressedOopsMode) {
  74       tty->cr();
  75       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
  76     }
  77     // OS ignored requested address. Try different address.
  78     if (special) {
  79       if (!os::release_memory_special(base, size)) {
  80         fatal("os::release_memory_special failed");
  81       }
  82     } else {
  83       if (!os::release_memory(base, size)) {
  84         fatal("os::release_memory failed");
  85       }
  86     }
  87   }
  88   return true;
  89 }
  90 
  91 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
  92                                char* requested_address,
  93                                bool executable) {
  94   const size_t granularity = os::vm_allocation_granularity();
  95   assert((size & (granularity - 1)) == 0,
  96          "size not aligned to os::vm_allocation_granularity()");
  97   assert((alignment & (granularity - 1)) == 0,
  98          "alignment not aligned to os::vm_allocation_granularity()");
  99   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 100          "not a power of 2");
 101 
 102   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 103 
 104   _base = NULL;
 105   _size = 0;
 106   _special = false;
 107   _executable = executable;
 108   _alignment = 0;
 109   _noaccess_prefix = 0;
 110   if (size == 0) {
 111     return;
 112   }
 113 
 114   // If OS doesn't support demand paging for large page memory, we need
 115   // to use reserve_memory_special() to reserve and pin the entire region.
 116   bool special = large && !os::can_commit_large_page_memory();
 117   char* base = NULL;
 118 
 119   if (special) {
 120 
 121     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 122 
 123     if (base != NULL) {
 124       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 125         // OS ignored requested address. Try different address.
 126         return;
 127       }
 128       // Check alignment constraints.
 129       assert((uintptr_t) base % alignment == 0,
 130              err_msg("Large pages returned a non-aligned address, base: "
 131                  PTR_FORMAT " alignment: " PTR_FORMAT,
 132                  base, (void*)(uintptr_t)alignment));
 133       _special = true;
 134     } else {
 135       // failed; try to reserve regular memory below
 136       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 137                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 138         if (PrintCompressedOopsMode) {
 139           tty->cr();
 140           tty->print_cr("Reserve regular memory without large pages.");
 141         }
 142       }
 143     }
 144   }
 145 
 146   if (base == NULL) {
 147     // Optimistically assume that the OSes returns an aligned base pointer.
 148     // When reserving a large address range, most OSes seem to align to at
 149     // least 64K.
 150 
 151     // If the memory was requested at a particular address, use
 152     // os::attempt_reserve_memory_at() to avoid over mapping something
 153     // important.  If available space is not detected, return NULL.
 154 
 155     if (requested_address != 0) {
 156       base = os::attempt_reserve_memory_at(size, requested_address);
 157       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 158         // OS ignored requested address. Try different address.
 159         base = NULL;
 160       }
 161     } else {
 162       base = os::reserve_memory(size, NULL, alignment);
 163     }
 164 
 165     if (base == NULL) return;
 166 
 167     // Check alignment constraints
 168     if ((((size_t)base) & (alignment - 1)) != 0) {
 169       // Base not aligned, retry
 170       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 171       // Make sure that size is aligned
 172       size = align_size_up(size, alignment);
 173       base = os::reserve_memory_aligned(size, alignment);
 174 
 175       if (requested_address != 0 &&
 176           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 177         // As a result of the alignment constraints, the allocated base differs
 178         // from the requested address. Return back to the caller who can
 179         // take remedial action (like try again without a requested address).
 180         assert(_base == NULL, "should be");
 181         return;
 182       }
 183     }
 184   }
 185   // Done
 186   _base = base;
 187   _size = size;
 188   _alignment = alignment;
 189 }
 190 
 191 
 192 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 193                              bool special, bool executable) {
 194   assert((size % os::vm_allocation_granularity()) == 0,
 195          "size not allocation aligned");
 196   _base = base;
 197   _size = size;
 198   _alignment = alignment;
 199   _noaccess_prefix = 0;
 200   _special = special;
 201   _executable = executable;
 202 }
 203 
 204 
 205 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 206                                         bool split, bool realloc) {
 207   assert(partition_size <= size(), "partition failed");
 208   if (split) {
 209     os::split_reserved_memory(base(), size(), partition_size, realloc);
 210   }
 211   ReservedSpace result(base(), partition_size, alignment, special(),
 212                        executable());
 213   return result;
 214 }
 215 
 216 
 217 ReservedSpace
 218 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 219   assert(partition_size <= size(), "partition failed");
 220   ReservedSpace result(base() + partition_size, size() - partition_size,
 221                        alignment, special(), executable());
 222   return result;
 223 }
 224 
 225 
 226 size_t ReservedSpace::page_align_size_up(size_t size) {
 227   return align_size_up(size, os::vm_page_size());
 228 }
 229 
 230 
 231 size_t ReservedSpace::page_align_size_down(size_t size) {
 232   return align_size_down(size, os::vm_page_size());
 233 }
 234 
 235 
 236 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 237   return align_size_up(size, os::vm_allocation_granularity());
 238 }
 239 
 240 
 241 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 242   return align_size_down(size, os::vm_allocation_granularity());
 243 }
 244 
 245 
 246 void ReservedSpace::release() {
 247   if (is_reserved()) {
 248     char *real_base = _base - _noaccess_prefix;
 249     const size_t real_size = _size + _noaccess_prefix;
 250     if (special()) {
 251       os::release_memory_special(real_base, real_size);
 252     } else{
 253       os::release_memory(real_base, real_size);
 254     }
 255     _base = NULL;
 256     _size = 0;
 257     _noaccess_prefix = 0;
 258     _alignment = 0;
 259     _special = false;
 260     _executable = false;
 261   }
 262 }
 263 
 264 static size_t noaccess_prefix_size(size_t alignment) {
 265   return lcm(os::vm_page_size(), alignment);
 266 }
 267 
 268 void ReservedHeapSpace::establish_noaccess_prefix() {
 269   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 270   _noaccess_prefix = noaccess_prefix_size(_alignment);
 271 
 272   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 273     if (true
 274         WIN64_ONLY(&& !UseLargePages)
 275         AIX_ONLY(&& os::vm_page_size() != SIZE_64K)) {
 276       // Protect memory at the base of the allocated region.
 277       // If special, the page was committed (only matters on windows)
 278       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 279         fatal("cannot protect protection page");
 280       }
 281       if (PrintCompressedOopsMode) {
 282         tty->cr();
 283         tty->print_cr("Protected page at the reserved heap base: "
 284                       PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
 285       }
 286       assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
 287     } else {
 288       Universe::set_narrow_oop_use_implicit_null_checks(false);
 289     }
 290   }
 291 
 292   _base += _noaccess_prefix;
 293   _size -= _noaccess_prefix;
 294   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 295 }
 296 
 297 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 298 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 299 // might still fulfill the wishes of the caller.
 300 // Assures the memory is aligned to 'alignment'.
 301 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 302 void ReservedHeapSpace::try_reserve_heap(size_t size,
 303                                          size_t alignment,
 304                                          bool large,
 305                                          char* requested_address) {
 306   if (_base != NULL) {
 307     // We tried before, but we didn't like the address delivered.
 308     release();
 309   }
 310 
 311   // If OS doesn't support demand paging for large page memory, we need
 312   // to use reserve_memory_special() to reserve and pin the entire region.
 313   bool special = large && !os::can_commit_large_page_memory();
 314   char* base = NULL;
 315 
 316   if (PrintCompressedOopsMode && Verbose) {
 317     tty->print("Trying to allocate at address " PTR_FORMAT " heap of size " PTR_FORMAT ".\n",
 318                requested_address, (address)size);
 319   }
 320 
 321   if (special) {
 322     base = os::reserve_memory_special(size, alignment, requested_address, false);
 323 
 324     if (base != NULL) {
 325       // Check alignment constraints.
 326       assert((uintptr_t) base % alignment == 0,
 327              err_msg("Large pages returned a non-aligned address, base: "
 328                      PTR_FORMAT " alignment: " PTR_FORMAT,
 329                      base, (void*)(uintptr_t)alignment));
 330       _special = true;
 331     }
 332   }
 333 
 334   if (base == NULL) {
 335     // Failed; try to reserve regular memory below
 336     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 337                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 338       if (PrintCompressedOopsMode) {
 339         tty->cr();
 340         tty->print_cr("Reserve regular memory without large pages.");
 341       }
 342     }
 343 
 344     // Optimistically assume that the OSes returns an aligned base pointer.
 345     // When reserving a large address range, most OSes seem to align to at
 346     // least 64K.
 347 
 348     // If the memory was requested at a particular address, use
 349     // os::attempt_reserve_memory_at() to avoid over mapping something
 350     // important.  If available space is not detected, return NULL.
 351 
 352     if (requested_address != 0) {
 353       base = os::attempt_reserve_memory_at(size, requested_address);
 354     } else {
 355       base = os::reserve_memory(size, NULL, alignment);
 356     }
 357   }
 358   if (base == NULL) { return; }
 359 
 360   // Done
 361   _base = base;
 362   _size = size;
 363   _alignment = alignment;
 364 
 365   // Check alignment constraints
 366   if ((((size_t)base) & (alignment - 1)) != 0) {
 367     // Base not aligned, retry.
 368     release();
 369   }
 370 }
 371 
 372 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 373                                           char *lowest_start,
 374                                           size_t attach_point_alignment,
 375                                           char *aligned_heap_base_min_address,
 376                                           char *upper_bound,
 377                                           size_t size,
 378                                           size_t alignment,
 379                                           bool large) {
 380   const size_t attach_range = highest_start - lowest_start;
 381   // Cap num_attempts at possible number.
 382   // At least one is possible even for 0 sized attach range.
 383   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
 384   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
 385 
 386   const size_t stepsize = (attach_range == 0) ? // Only one try.
 387     (size_t) highest_start : align_size_up(attach_range / num_attempts_to_try, attach_point_alignment);
 388 
 389   // Try attach points from top to bottom.
 390   char* attach_point = highest_start;
 391   while (attach_point >= lowest_start  &&
 392          attach_point <= highest_start &&  // Avoid wrap around.
 393          ((_base == NULL) ||
 394           (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
 395     try_reserve_heap(size, alignment, large, attach_point);
 396     attach_point -= stepsize;
 397   }
 398 }
 399 
 400 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
 401 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
 402 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
 403 
 404 // Helper for heap allocation. Returns an array with addresses
 405 // (OS-specific) which are suited for disjoint base mode. Array is
 406 // NULL terminated.
 407 static char** get_attach_addresses_for_disjoint_mode() {
 408   static uint64_t addresses[] = {
 409      2 * SIZE_32G,
 410      3 * SIZE_32G,
 411      4 * SIZE_32G,
 412      8 * SIZE_32G,
 413     10 * SIZE_32G,
 414      1 * SIZE_64K * SIZE_32G,
 415      2 * SIZE_64K * SIZE_32G,
 416      3 * SIZE_64K * SIZE_32G,
 417      4 * SIZE_64K * SIZE_32G,
 418     16 * SIZE_64K * SIZE_32G,
 419     32 * SIZE_64K * SIZE_32G,
 420     34 * SIZE_64K * SIZE_32G,
 421     0
 422   };
 423 
 424   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
 425   // the array is sorted.
 426   uint i = 0;
 427   while (addresses[i] != 0 &&
 428          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
 429     i++;
 430   }
 431   uint start = i;
 432 
 433   // Avoid more steps than requested.
 434   i = 0;
 435   while (addresses[start+i] != 0) {
 436     if (i == HeapSearchSteps) {
 437       addresses[start+i] = 0;
 438       break;
 439     }
 440     i++;
 441   }
 442 
 443   return (char**) &addresses[start];
 444 }
 445 
 446 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
 447   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
 448             "can not allocate compressed oop heap for this size");
 449   guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
 450   assert(HeapBaseMinAddress > 0, "sanity");
 451 
 452   const size_t granularity = os::vm_allocation_granularity();
 453   assert((size & (granularity - 1)) == 0,
 454          "size not aligned to os::vm_allocation_granularity()");
 455   assert((alignment & (granularity - 1)) == 0,
 456          "alignment not aligned to os::vm_allocation_granularity()");
 457   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 458          "not a power of 2");
 459 
 460   // The necessary attach point alignment for generated wish addresses.
 461   // This is needed to increase the chance of attaching for mmap and shmat.
 462   const size_t os_attach_point_alignment =
 463     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
 464     NOT_AIX(os::vm_allocation_granularity());
 465   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 466 
 467   char *aligned_heap_base_min_address = (char *)align_ptr_up((void *)HeapBaseMinAddress, alignment);
 468   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
 469     noaccess_prefix_size(alignment) : 0;
 470 
 471   // Attempt to alloc at user-given address.
 472   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 473     try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
 474     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
 475       release();
 476     }
 477   }
 478 
 479   // Keep heap at HeapBaseMinAddress.
 480   if (_base == NULL) {
 481 
 482     // Try to allocate the heap at addresses that allow efficient oop compression.
 483     // Different schemes are tried, in order of decreasing optimization potential.
 484     //
 485     // For this, try_reserve_heap() is called with the desired heap base addresses.
 486     // A call into the os layer to allocate at a given address can return memory
 487     // at a different address than requested.  Still, this might be memory at a useful
 488     // address. try_reserve_heap() always returns this allocated memory, as only here
 489     // the criteria for a good heap are checked.
 490 
 491     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 492     // Give it several tries from top of range to bottom.
 493     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
 494 
 495       // Calc address range within we try to attach (range of possible start addresses).
 496       char* const highest_start = (char *)align_ptr_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 497       char* const lowest_start  = (char *)align_ptr_up  (        aligned_heap_base_min_address             , attach_point_alignment);
 498       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 499                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
 500     }
 501 
 502     // zerobased: Attempt to allocate in the lower 32G.
 503     // But leave room for the compressed class pointers, which is allocated above
 504     // the heap.
 505     char *zerobased_max = (char *)OopEncodingHeapMax;
 506     // For small heaps, save some space for compressed class pointer
 507     // space so it can be decoded with no base.
 508     if (UseCompressedClassPointers && !UseSharedSpaces &&
 509         OopEncodingHeapMax <= KlassEncodingMetaspaceMax) {
 510       const size_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
 511       zerobased_max = (char *)OopEncodingHeapMax - class_space;
 512     }
 513 
 514     // Give it several tries from top of range to bottom.
 515     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
 516         ((_base == NULL) ||                        // No previous try succeeded.
 517          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 518 
 519       // Calc address range within we try to attach (range of possible start addresses).
 520       char *const highest_start = (char *)align_ptr_down(zerobased_max - size, attach_point_alignment);
 521       // SS10 and SS12u1 cannot compile "(char *)UnscaledOopHeapMax - size" on solaris sparc 32-bit:
 522       // "Cannot use int to initialize char*." Introduce aux variable.
 523       char *unscaled_end = (char *)UnscaledOopHeapMax;
 524       unscaled_end -= size;
 525       char *lowest_start = (size < UnscaledOopHeapMax) ?
 526         MAX2(unscaled_end, aligned_heap_base_min_address) : aligned_heap_base_min_address;
 527       lowest_start  = (char *)align_ptr_up(lowest_start, attach_point_alignment);
 528       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 529                         aligned_heap_base_min_address, zerobased_max, size, alignment, large);
 530     }
 531 
 532     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 533     // implement null checks.
 534     noaccess_prefix = noaccess_prefix_size(alignment);
 535 
 536     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
 537     char** addresses = get_attach_addresses_for_disjoint_mode();
 538     int i = 0;
 539     while (addresses[i] &&                                 // End of array not yet reached.
 540            ((_base == NULL) ||                             // No previous try succeeded.
 541             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 542              !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 543       char* const attach_point = addresses[i];
 544       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 545       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 546       i++;
 547     }
 548 
 549     // Last, desperate try without any placement.
 550     if (_base == NULL) {
 551       if (PrintCompressedOopsMode && Verbose) {
 552         tty->print("Trying to allocate at address NULL heap of size " PTR_FORMAT ".\n", (address)size + noaccess_prefix);
 553       }
 554       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 555     }
 556   }
 557 }
 558 
 559 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() {
 560 
 561   if (size == 0) {
 562     return;
 563   }
 564 
 565   // Heap size should be aligned to alignment, too.
 566   guarantee(is_size_aligned(size, alignment), "set by caller");
 567 
 568   if (UseCompressedOops) {
 569     initialize_compressed_heap(size, alignment, large);
 570     if (_size > size) {
 571       // We allocated heap with noaccess prefix.
 572       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 573       // if we had to try at arbitrary address.
 574       establish_noaccess_prefix();
 575     }
 576   } else {
 577     initialize(size, alignment, large, NULL, false);
 578   }
 579 
 580   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 581          "area must be distinguishable from marks for mark-sweep");
 582   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 583          "area must be distinguishable from marks for mark-sweep");
 584 
 585   if (base() > 0) {
 586     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 587   }
 588 }
 589 
 590 // Reserve space for code segment.  Same as Java heap only we mark this as
 591 // executable.
 592 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 593                                      size_t rs_align,
 594                                      bool large) :
 595   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 596   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 597 }
 598 
 599 // VirtualSpace
 600 
 601 VirtualSpace::VirtualSpace() {
 602   _low_boundary           = NULL;
 603   _high_boundary          = NULL;
 604   _low                    = NULL;
 605   _high                   = NULL;
 606   _lower_high             = NULL;
 607   _middle_high            = NULL;
 608   _upper_high             = NULL;
 609   _lower_high_boundary    = NULL;
 610   _middle_high_boundary   = NULL;
 611   _upper_high_boundary    = NULL;
 612   _lower_alignment        = 0;
 613   _middle_alignment       = 0;
 614   _upper_alignment        = 0;
 615   _special                = false;
 616   _executable             = false;
 617 }
 618 
 619 
 620 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 621   const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
 622   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 623 }
 624 
 625 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 626   if(!rs.is_reserved()) return false;  // allocation failed.
 627   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 628   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 629 
 630   _low_boundary  = rs.base();
 631   _high_boundary = low_boundary() + rs.size();
 632 
 633   _low = low_boundary();
 634   _high = low();
 635 
 636   _special = rs.special();
 637   _executable = rs.executable();
 638 
 639   // When a VirtualSpace begins life at a large size, make all future expansion
 640   // and shrinking occur aligned to a granularity of large pages.  This avoids
 641   // fragmentation of physical addresses that inhibits the use of large pages
 642   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 643   // page size, the only spaces that get handled this way are codecache and
 644   // the heap itself, both of which provide a substantial performance
 645   // boost in many benchmarks when covered by large pages.
 646   //
 647   // No attempt is made to force large page alignment at the very top and
 648   // bottom of the space if they are not aligned so already.
 649   _lower_alignment  = os::vm_page_size();
 650   _middle_alignment = max_commit_granularity;
 651   _upper_alignment  = os::vm_page_size();
 652 
 653   // End of each region
 654   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
 655   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
 656   _upper_high_boundary = high_boundary();
 657 
 658   // High address of each region
 659   _lower_high = low_boundary();
 660   _middle_high = lower_high_boundary();
 661   _upper_high = middle_high_boundary();
 662 
 663   // commit to initial size
 664   if (committed_size > 0) {
 665     if (!expand_by(committed_size)) {
 666       return false;
 667     }
 668   }
 669   return true;
 670 }
 671 
 672 
 673 VirtualSpace::~VirtualSpace() {
 674   release();
 675 }
 676 
 677 
 678 void VirtualSpace::release() {
 679   // This does not release memory it never reserved.
 680   // Caller must release via rs.release();
 681   _low_boundary           = NULL;
 682   _high_boundary          = NULL;
 683   _low                    = NULL;
 684   _high                   = NULL;
 685   _lower_high             = NULL;
 686   _middle_high            = NULL;
 687   _upper_high             = NULL;
 688   _lower_high_boundary    = NULL;
 689   _middle_high_boundary   = NULL;
 690   _upper_high_boundary    = NULL;
 691   _lower_alignment        = 0;
 692   _middle_alignment       = 0;
 693   _upper_alignment        = 0;
 694   _special                = false;
 695   _executable             = false;
 696 }
 697 
 698 
 699 size_t VirtualSpace::committed_size() const {
 700   return pointer_delta(high(), low(), sizeof(char));
 701 }
 702 
 703 
 704 size_t VirtualSpace::reserved_size() const {
 705   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 706 }
 707 
 708 
 709 size_t VirtualSpace::uncommitted_size()  const {
 710   return reserved_size() - committed_size();
 711 }
 712 
 713 size_t VirtualSpace::actual_committed_size() const {
 714   // Special VirtualSpaces commit all reserved space up front.
 715   if (special()) {
 716     return reserved_size();
 717   }
 718 
 719   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 720   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 721   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 722 
 723 #ifdef ASSERT
 724   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 725   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 726   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 727 
 728   if (committed_high > 0) {
 729     assert(committed_low == lower, "Must be");
 730     assert(committed_middle == middle, "Must be");
 731   }
 732 
 733   if (committed_middle > 0) {
 734     assert(committed_low == lower, "Must be");
 735   }
 736   if (committed_middle < middle) {
 737     assert(committed_high == 0, "Must be");
 738   }
 739 
 740   if (committed_low < lower) {
 741     assert(committed_high == 0, "Must be");
 742     assert(committed_middle == 0, "Must be");
 743   }
 744 #endif
 745 
 746   return committed_low + committed_middle + committed_high;
 747 }
 748 
 749 
 750 bool VirtualSpace::contains(const void* p) const {
 751   return low() <= (const char*) p && (const char*) p < high();
 752 }
 753 
 754 /*
 755    First we need to determine if a particular virtual space is using large
 756    pages.  This is done at the initialize function and only virtual spaces
 757    that are larger than LargePageSizeInBytes use large pages.  Once we
 758    have determined this, all expand_by and shrink_by calls must grow and
 759    shrink by large page size chunks.  If a particular request
 760    is within the current large page, the call to commit and uncommit memory
 761    can be ignored.  In the case that the low and high boundaries of this
 762    space is not large page aligned, the pages leading to the first large
 763    page address and the pages after the last large page address must be
 764    allocated with default pages.
 765 */
 766 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 767   if (uncommitted_size() < bytes) return false;
 768 
 769   if (special()) {
 770     // don't commit memory if the entire space is pinned in memory
 771     _high += bytes;
 772     return true;
 773   }
 774 
 775   char* previous_high = high();
 776   char* unaligned_new_high = high() + bytes;
 777   assert(unaligned_new_high <= high_boundary(),
 778          "cannot expand by more than upper boundary");
 779 
 780   // Calculate where the new high for each of the regions should be.  If
 781   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 782   // then the unaligned lower and upper new highs would be the
 783   // lower_high() and upper_high() respectively.
 784   char* unaligned_lower_new_high =
 785     MIN2(unaligned_new_high, lower_high_boundary());
 786   char* unaligned_middle_new_high =
 787     MIN2(unaligned_new_high, middle_high_boundary());
 788   char* unaligned_upper_new_high =
 789     MIN2(unaligned_new_high, upper_high_boundary());
 790 
 791   // Align the new highs based on the regions alignment.  lower and upper
 792   // alignment will always be default page size.  middle alignment will be
 793   // LargePageSizeInBytes if the actual size of the virtual space is in
 794   // fact larger than LargePageSizeInBytes.
 795   char* aligned_lower_new_high =
 796     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 797   char* aligned_middle_new_high =
 798     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 799   char* aligned_upper_new_high =
 800     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 801 
 802   // Determine which regions need to grow in this expand_by call.
 803   // If you are growing in the lower region, high() must be in that
 804   // region so calculate the size based on high().  For the middle and
 805   // upper regions, determine the starting point of growth based on the
 806   // location of high().  By getting the MAX of the region's low address
 807   // (or the previous region's high address) and high(), we can tell if it
 808   // is an intra or inter region growth.
 809   size_t lower_needs = 0;
 810   if (aligned_lower_new_high > lower_high()) {
 811     lower_needs =
 812       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 813   }
 814   size_t middle_needs = 0;
 815   if (aligned_middle_new_high > middle_high()) {
 816     middle_needs =
 817       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 818   }
 819   size_t upper_needs = 0;
 820   if (aligned_upper_new_high > upper_high()) {
 821     upper_needs =
 822       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 823   }
 824 
 825   // Check contiguity.
 826   assert(low_boundary() <= lower_high() &&
 827          lower_high() <= lower_high_boundary(),
 828          "high address must be contained within the region");
 829   assert(lower_high_boundary() <= middle_high() &&
 830          middle_high() <= middle_high_boundary(),
 831          "high address must be contained within the region");
 832   assert(middle_high_boundary() <= upper_high() &&
 833          upper_high() <= upper_high_boundary(),
 834          "high address must be contained within the region");
 835 
 836   // Commit regions
 837   if (lower_needs > 0) {
 838     assert(low_boundary() <= lower_high() &&
 839            lower_high() + lower_needs <= lower_high_boundary(),
 840            "must not expand beyond region");
 841     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
 842       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 843                          ", lower_needs=" SIZE_FORMAT ", %d) failed",
 844                          lower_high(), lower_needs, _executable);)
 845       return false;
 846     } else {
 847       _lower_high += lower_needs;
 848     }
 849   }
 850   if (middle_needs > 0) {
 851     assert(lower_high_boundary() <= middle_high() &&
 852            middle_high() + middle_needs <= middle_high_boundary(),
 853            "must not expand beyond region");
 854     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
 855                            _executable)) {
 856       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 857                          ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
 858                          ", %d) failed", middle_high(), middle_needs,
 859                          middle_alignment(), _executable);)
 860       return false;
 861     }
 862     _middle_high += middle_needs;
 863   }
 864   if (upper_needs > 0) {
 865     assert(middle_high_boundary() <= upper_high() &&
 866            upper_high() + upper_needs <= upper_high_boundary(),
 867            "must not expand beyond region");
 868     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
 869       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 870                          ", upper_needs=" SIZE_FORMAT ", %d) failed",
 871                          upper_high(), upper_needs, _executable);)
 872       return false;
 873     } else {
 874       _upper_high += upper_needs;
 875     }
 876   }
 877 
 878   if (pre_touch || AlwaysPreTouch) {
 879     os::pretouch_memory(previous_high, unaligned_new_high);
 880   }
 881 
 882   _high += bytes;
 883   return true;
 884 }
 885 
 886 // A page is uncommitted if the contents of the entire page is deemed unusable.
 887 // Continue to decrement the high() pointer until it reaches a page boundary
 888 // in which case that particular page can now be uncommitted.
 889 void VirtualSpace::shrink_by(size_t size) {
 890   if (committed_size() < size)
 891     fatal("Cannot shrink virtual space to negative size");
 892 
 893   if (special()) {
 894     // don't uncommit if the entire space is pinned in memory
 895     _high -= size;
 896     return;
 897   }
 898 
 899   char* unaligned_new_high = high() - size;
 900   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 901 
 902   // Calculate new unaligned address
 903   char* unaligned_upper_new_high =
 904     MAX2(unaligned_new_high, middle_high_boundary());
 905   char* unaligned_middle_new_high =
 906     MAX2(unaligned_new_high, lower_high_boundary());
 907   char* unaligned_lower_new_high =
 908     MAX2(unaligned_new_high, low_boundary());
 909 
 910   // Align address to region's alignment
 911   char* aligned_upper_new_high =
 912     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 913   char* aligned_middle_new_high =
 914     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 915   char* aligned_lower_new_high =
 916     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 917 
 918   // Determine which regions need to shrink
 919   size_t upper_needs = 0;
 920   if (aligned_upper_new_high < upper_high()) {
 921     upper_needs =
 922       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 923   }
 924   size_t middle_needs = 0;
 925   if (aligned_middle_new_high < middle_high()) {
 926     middle_needs =
 927       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 928   }
 929   size_t lower_needs = 0;
 930   if (aligned_lower_new_high < lower_high()) {
 931     lower_needs =
 932       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 933   }
 934 
 935   // Check contiguity.
 936   assert(middle_high_boundary() <= upper_high() &&
 937          upper_high() <= upper_high_boundary(),
 938          "high address must be contained within the region");
 939   assert(lower_high_boundary() <= middle_high() &&
 940          middle_high() <= middle_high_boundary(),
 941          "high address must be contained within the region");
 942   assert(low_boundary() <= lower_high() &&
 943          lower_high() <= lower_high_boundary(),
 944          "high address must be contained within the region");
 945 
 946   // Uncommit
 947   if (upper_needs > 0) {
 948     assert(middle_high_boundary() <= aligned_upper_new_high &&
 949            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 950            "must not shrink beyond region");
 951     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 952       debug_only(warning("os::uncommit_memory failed"));
 953       return;
 954     } else {
 955       _upper_high -= upper_needs;
 956     }
 957   }
 958   if (middle_needs > 0) {
 959     assert(lower_high_boundary() <= aligned_middle_new_high &&
 960            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 961            "must not shrink beyond region");
 962     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 963       debug_only(warning("os::uncommit_memory failed"));
 964       return;
 965     } else {
 966       _middle_high -= middle_needs;
 967     }
 968   }
 969   if (lower_needs > 0) {
 970     assert(low_boundary() <= aligned_lower_new_high &&
 971            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 972            "must not shrink beyond region");
 973     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 974       debug_only(warning("os::uncommit_memory failed"));
 975       return;
 976     } else {
 977       _lower_high -= lower_needs;
 978     }
 979   }
 980 
 981   _high -= size;
 982 }
 983 
 984 #ifndef PRODUCT
 985 void VirtualSpace::check_for_contiguity() {
 986   // Check contiguity.
 987   assert(low_boundary() <= lower_high() &&
 988          lower_high() <= lower_high_boundary(),
 989          "high address must be contained within the region");
 990   assert(lower_high_boundary() <= middle_high() &&
 991          middle_high() <= middle_high_boundary(),
 992          "high address must be contained within the region");
 993   assert(middle_high_boundary() <= upper_high() &&
 994          upper_high() <= upper_high_boundary(),
 995          "high address must be contained within the region");
 996   assert(low() >= low_boundary(), "low");
 997   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
 998   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
 999   assert(high() <= upper_high(), "upper high");
1000 }
1001 
1002 void VirtualSpace::print_on(outputStream* out) {
1003   out->print   ("Virtual space:");
1004   if (special()) out->print(" (pinned in memory)");
1005   out->cr();
1006   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
1007   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
1008   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
1009   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
1010 }
1011 
1012 void VirtualSpace::print() {
1013   print_on(tty);
1014 }
1015 
1016 /////////////// Unit tests ///////////////
1017 
1018 #ifndef PRODUCT
1019 
1020 #define test_log(...) \
1021   do {\
1022     if (VerboseInternalVMTests) { \
1023       tty->print_cr(__VA_ARGS__); \
1024       tty->flush(); \
1025     }\
1026   } while (false)
1027 
1028 class TestReservedSpace : AllStatic {
1029  public:
1030   static void small_page_write(void* addr, size_t size) {
1031     size_t page_size = os::vm_page_size();
1032 
1033     char* end = (char*)addr + size;
1034     for (char* p = (char*)addr; p < end; p += page_size) {
1035       *p = 1;
1036     }
1037   }
1038 
1039   static void release_memory_for_test(ReservedSpace rs) {
1040     if (rs.special()) {
1041       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1042     } else {
1043       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1044     }
1045   }
1046 
1047   static void test_reserved_space1(size_t size, size_t alignment) {
1048     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
1049 
1050     assert(is_size_aligned(size, alignment), "Incorrect input parameters");
1051 
1052     ReservedSpace rs(size,          // size
1053                      alignment,     // alignment
1054                      UseLargePages, // large
1055                      (char *)NULL); // requested_address
1056 
1057     test_log(" rs.special() == %d", rs.special());
1058 
1059     assert(rs.base() != NULL, "Must be");
1060     assert(rs.size() == size, "Must be");
1061 
1062     assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1063     assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1064 
1065     if (rs.special()) {
1066       small_page_write(rs.base(), size);
1067     }
1068 
1069     release_memory_for_test(rs);
1070   }
1071 
1072   static void test_reserved_space2(size_t size) {
1073     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
1074 
1075     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1076 
1077     ReservedSpace rs(size);
1078 
1079     test_log(" rs.special() == %d", rs.special());
1080 
1081     assert(rs.base() != NULL, "Must be");
1082     assert(rs.size() == size, "Must be");
1083 
1084     if (rs.special()) {
1085       small_page_write(rs.base(), size);
1086     }
1087 
1088     release_memory_for_test(rs);
1089   }
1090 
1091   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1092     test_log("test_reserved_space3(%p, %p, %d)",
1093         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
1094 
1095     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1096     assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
1097 
1098     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1099 
1100     ReservedSpace rs(size, alignment, large, false);
1101 
1102     test_log(" rs.special() == %d", rs.special());
1103 
1104     assert(rs.base() != NULL, "Must be");
1105     assert(rs.size() == size, "Must be");
1106 
1107     if (rs.special()) {
1108       small_page_write(rs.base(), size);
1109     }
1110 
1111     release_memory_for_test(rs);
1112   }
1113 
1114 
1115   static void test_reserved_space1() {
1116     size_t size = 2 * 1024 * 1024;
1117     size_t ag   = os::vm_allocation_granularity();
1118 
1119     test_reserved_space1(size,      ag);
1120     test_reserved_space1(size * 2,  ag);
1121     test_reserved_space1(size * 10, ag);
1122   }
1123 
1124   static void test_reserved_space2() {
1125     size_t size = 2 * 1024 * 1024;
1126     size_t ag = os::vm_allocation_granularity();
1127 
1128     test_reserved_space2(size * 1);
1129     test_reserved_space2(size * 2);
1130     test_reserved_space2(size * 10);
1131     test_reserved_space2(ag);
1132     test_reserved_space2(size - ag);
1133     test_reserved_space2(size);
1134     test_reserved_space2(size + ag);
1135     test_reserved_space2(size * 2);
1136     test_reserved_space2(size * 2 - ag);
1137     test_reserved_space2(size * 2 + ag);
1138     test_reserved_space2(size * 3);
1139     test_reserved_space2(size * 3 - ag);
1140     test_reserved_space2(size * 3 + ag);
1141     test_reserved_space2(size * 10);
1142     test_reserved_space2(size * 10 + size / 2);
1143   }
1144 
1145   static void test_reserved_space3() {
1146     size_t ag = os::vm_allocation_granularity();
1147 
1148     test_reserved_space3(ag,      ag    , false);
1149     test_reserved_space3(ag * 2,  ag    , false);
1150     test_reserved_space3(ag * 3,  ag    , false);
1151     test_reserved_space3(ag * 2,  ag * 2, false);
1152     test_reserved_space3(ag * 4,  ag * 2, false);
1153     test_reserved_space3(ag * 8,  ag * 2, false);
1154     test_reserved_space3(ag * 4,  ag * 4, false);
1155     test_reserved_space3(ag * 8,  ag * 4, false);
1156     test_reserved_space3(ag * 16, ag * 4, false);
1157 
1158     if (UseLargePages) {
1159       size_t lp = os::large_page_size();
1160 
1161       // Without large pages
1162       test_reserved_space3(lp,     ag * 4, false);
1163       test_reserved_space3(lp * 2, ag * 4, false);
1164       test_reserved_space3(lp * 4, ag * 4, false);
1165       test_reserved_space3(lp,     lp    , false);
1166       test_reserved_space3(lp * 2, lp    , false);
1167       test_reserved_space3(lp * 3, lp    , false);
1168       test_reserved_space3(lp * 2, lp * 2, false);
1169       test_reserved_space3(lp * 4, lp * 2, false);
1170       test_reserved_space3(lp * 8, lp * 2, false);
1171 
1172       // With large pages
1173       test_reserved_space3(lp, ag * 4    , true);
1174       test_reserved_space3(lp * 2, ag * 4, true);
1175       test_reserved_space3(lp * 4, ag * 4, true);
1176       test_reserved_space3(lp, lp        , true);
1177       test_reserved_space3(lp * 2, lp    , true);
1178       test_reserved_space3(lp * 3, lp    , true);
1179       test_reserved_space3(lp * 2, lp * 2, true);
1180       test_reserved_space3(lp * 4, lp * 2, true);
1181       test_reserved_space3(lp * 8, lp * 2, true);
1182     }
1183   }
1184 
1185   static void test_reserved_space() {
1186     test_reserved_space1();
1187     test_reserved_space2();
1188     test_reserved_space3();
1189   }
1190 };
1191 
1192 void TestReservedSpace_test() {
1193   TestReservedSpace::test_reserved_space();
1194 }
1195 
1196 #define assert_equals(actual, expected)     \
1197   assert(actual == expected,                \
1198     err_msg("Got " SIZE_FORMAT " expected " \
1199       SIZE_FORMAT, actual, expected));
1200 
1201 #define assert_ge(value1, value2)                  \
1202   assert(value1 >= value2,                         \
1203     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
1204       #value2 "': " SIZE_FORMAT, value1, value2));
1205 
1206 #define assert_lt(value1, value2)                  \
1207   assert(value1 < value2,                          \
1208     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
1209       #value2 "': " SIZE_FORMAT, value1, value2));
1210 
1211 
1212 class TestVirtualSpace : AllStatic {
1213   enum TestLargePages {
1214     Default,
1215     Disable,
1216     Reserve,
1217     Commit
1218   };
1219 
1220   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
1221     switch(mode) {
1222     default:
1223     case Default:
1224     case Reserve:
1225       return ReservedSpace(reserve_size_aligned);
1226     case Disable:
1227     case Commit:
1228       return ReservedSpace(reserve_size_aligned,
1229                            os::vm_allocation_granularity(),
1230                            /* large */ false, /* exec */ false);
1231     }
1232   }
1233 
1234   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1235     switch(mode) {
1236     default:
1237     case Default:
1238     case Reserve:
1239       return vs.initialize(rs, 0);
1240     case Disable:
1241       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1242     case Commit:
1243       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1244     }
1245   }
1246 
1247  public:
1248   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1249                                                         TestLargePages mode = Default) {
1250     size_t granularity = os::vm_allocation_granularity();
1251     size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
1252 
1253     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1254 
1255     assert(reserved.is_reserved(), "Must be");
1256 
1257     VirtualSpace vs;
1258     bool initialized = initialize_virtual_space(vs, reserved, mode);
1259     assert(initialized, "Failed to initialize VirtualSpace");
1260 
1261     vs.expand_by(commit_size, false);
1262 
1263     if (vs.special()) {
1264       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1265     } else {
1266       assert_ge(vs.actual_committed_size(), commit_size);
1267       // Approximate the commit granularity.
1268       // Make sure that we don't commit using large pages
1269       // if large pages has been disabled for this VirtualSpace.
1270       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1271                                    os::vm_page_size() : os::large_page_size();
1272       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1273     }
1274 
1275     reserved.release();
1276   }
1277 
1278   static void test_virtual_space_actual_committed_space_one_large_page() {
1279     if (!UseLargePages) {
1280       return;
1281     }
1282 
1283     size_t large_page_size = os::large_page_size();
1284 
1285     ReservedSpace reserved(large_page_size, large_page_size, true, false);
1286 
1287     assert(reserved.is_reserved(), "Must be");
1288 
1289     VirtualSpace vs;
1290     bool initialized = vs.initialize(reserved, 0);
1291     assert(initialized, "Failed to initialize VirtualSpace");
1292 
1293     vs.expand_by(large_page_size, false);
1294 
1295     assert_equals(vs.actual_committed_size(), large_page_size);
1296 
1297     reserved.release();
1298   }
1299 
1300   static void test_virtual_space_actual_committed_space() {
1301     test_virtual_space_actual_committed_space(4 * K, 0);
1302     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1303     test_virtual_space_actual_committed_space(8 * K, 0);
1304     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1305     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1306     test_virtual_space_actual_committed_space(12 * K, 0);
1307     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1308     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1309     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1310     test_virtual_space_actual_committed_space(64 * K, 0);
1311     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1312     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1313     test_virtual_space_actual_committed_space(2 * M, 0);
1314     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1315     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1316     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1317     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1318     test_virtual_space_actual_committed_space(10 * M, 0);
1319     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1320     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1321     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1322     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1323     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1324     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1325   }
1326 
1327   static void test_virtual_space_disable_large_pages() {
1328     if (!UseLargePages) {
1329       return;
1330     }
1331     // These test cases verify that if we force VirtualSpace to disable large pages
1332     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1333     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1334     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1335     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1336     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1337     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1338     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1339 
1340     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1341     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1342     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1343     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1344     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1345     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1346     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1347 
1348     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1349     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1350     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1351     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1352     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1353     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1354     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1355   }
1356 
1357   static void test_virtual_space() {
1358     test_virtual_space_actual_committed_space();
1359     test_virtual_space_actual_committed_space_one_large_page();
1360     test_virtual_space_disable_large_pages();
1361   }
1362 };
1363 
1364 void TestVirtualSpace_test() {
1365   TestVirtualSpace::test_virtual_space();
1366 }
1367 
1368 #endif // PRODUCT
1369 
1370 #endif