1 /*
   2  * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "oops/markOop.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "runtime/virtualspace.hpp"
  29 #include "services/memTracker.hpp"
  30 
  31 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  32 
  33 // ReservedSpace
  34 
  35 // Dummy constructor
  36 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  37     _alignment(0), _special(false), _executable(false) {
  38 }
  39 
  40 ReservedSpace::ReservedSpace(size_t size) {
  41   // Want to use large pages where possible and pad with small pages.
  42   size_t page_size = os::page_size_for_region_unaligned(size, 1);
  43   bool large_pages = page_size != (size_t)os::vm_page_size();
  44   // Don't force the alignment to be large page aligned,
  45   // since that will waste memory.
  46   size_t alignment = os::vm_allocation_granularity();
  47   initialize(size, alignment, large_pages, NULL, false);
  48 }
  49 
  50 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  51                              bool large,
  52                              char* requested_address) {
  53   initialize(size, alignment, large, requested_address, false);
  54 }
  55 
  56 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  57                              bool large,
  58                              bool executable) {
  59   initialize(size, alignment, large, NULL, executable);
  60 }
  61 
  62 // Helper method.
  63 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  64                                            const size_t size, bool special)
  65 {
  66   if (base == requested_address || requested_address == NULL)
  67     return false; // did not fail
  68 
  69   if (base != NULL) {
  70     // Different reserve address may be acceptable in other cases
  71     // but for compressed oops heap should be at requested address.
  72     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  73     if (PrintCompressedOopsMode) {
  74       tty->cr();
  75       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
  76     }
  77     // OS ignored requested address. Try different address.
  78     if (special) {
  79       if (!os::release_memory_special(base, size)) {
  80         fatal("os::release_memory_special failed");
  81       }
  82     } else {
  83       if (!os::release_memory(base, size)) {
  84         fatal("os::release_memory failed");
  85       }
  86     }
  87   }
  88   return true;
  89 }
  90 
  91 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
  92                                char* requested_address,
  93                                bool executable) {
  94   const size_t granularity = os::vm_allocation_granularity();
  95   assert((size & (granularity - 1)) == 0,
  96          "size not aligned to os::vm_allocation_granularity()");
  97   assert((alignment & (granularity - 1)) == 0,
  98          "alignment not aligned to os::vm_allocation_granularity()");
  99   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 100          "not a power of 2");
 101 
 102   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 103 
 104   _base = NULL;
 105   _size = 0;
 106   _special = false;
 107   _executable = executable;
 108   _alignment = 0;
 109   _noaccess_prefix = 0;
 110   if (size == 0) {
 111     return;
 112   }
 113 
 114   // If OS doesn't support demand paging for large page memory, we need
 115   // to use reserve_memory_special() to reserve and pin the entire region.
 116   bool special = large && !os::can_commit_large_page_memory();
 117   char* base = NULL;
 118 
 119   if (special) {
 120 
 121     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 122 
 123     if (base != NULL) {
 124       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 125         // OS ignored requested address. Try different address.
 126         return;
 127       }
 128       // Check alignment constraints.
 129       assert((uintptr_t) base % alignment == 0,
 130              err_msg("Large pages returned a non-aligned address, base: "
 131                  PTR_FORMAT " alignment: " PTR_FORMAT,
 132                  base, (void*)(uintptr_t)alignment));
 133       _special = true;
 134     } else {
 135       // failed; try to reserve regular memory below
 136       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 137                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 138         if (PrintCompressedOopsMode) {
 139           tty->cr();
 140           tty->print_cr("Reserve regular memory without large pages.");
 141         }
 142       }
 143     }
 144   }
 145 
 146   if (base == NULL) {
 147     // Optimistically assume that the OSes returns an aligned base pointer.
 148     // When reserving a large address range, most OSes seem to align to at
 149     // least 64K.
 150 
 151     // If the memory was requested at a particular address, use
 152     // os::attempt_reserve_memory_at() to avoid over mapping something
 153     // important.  If available space is not detected, return NULL.
 154 
 155     if (requested_address != 0) {
 156       base = os::attempt_reserve_memory_at(size, requested_address);
 157       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 158         // OS ignored requested address. Try different address.
 159         base = NULL;
 160       }
 161     } else {
 162       base = os::reserve_memory(size, NULL, alignment);
 163     }
 164 
 165     if (base == NULL) return;
 166 
 167     // Check alignment constraints
 168     if ((((size_t)base) & (alignment - 1)) != 0) {
 169       // Base not aligned, retry
 170       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 171       // Make sure that size is aligned
 172       size = align_size_up(size, alignment);
 173       base = os::reserve_memory_aligned(size, alignment);
 174 
 175       if (requested_address != 0 &&
 176           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 177         // As a result of the alignment constraints, the allocated base differs
 178         // from the requested address. Return back to the caller who can
 179         // take remedial action (like try again without a requested address).
 180         assert(_base == NULL, "should be");
 181         return;
 182       }
 183     }
 184   }
 185   // Done
 186   _base = base;
 187   _size = size;
 188   _alignment = alignment;
 189 }
 190 
 191 
 192 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 193                              bool special, bool executable) {
 194   assert((size % os::vm_allocation_granularity()) == 0,
 195          "size not allocation aligned");
 196   _base = base;
 197   _size = size;
 198   _alignment = alignment;
 199   _noaccess_prefix = 0;
 200   _special = special;
 201   _executable = executable;
 202 }
 203 
 204 
 205 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 206                                         bool split, bool realloc) {
 207   assert(partition_size <= size(), "partition failed");
 208   if (split) {
 209     os::split_reserved_memory(base(), size(), partition_size, realloc);
 210   }
 211   ReservedSpace result(base(), partition_size, alignment, special(),
 212                        executable());
 213   return result;
 214 }
 215 
 216 
 217 ReservedSpace
 218 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 219   assert(partition_size <= size(), "partition failed");
 220   ReservedSpace result(base() + partition_size, size() - partition_size,
 221                        alignment, special(), executable());
 222   return result;
 223 }
 224 
 225 
 226 size_t ReservedSpace::page_align_size_up(size_t size) {
 227   return align_size_up(size, os::vm_page_size());
 228 }
 229 
 230 
 231 size_t ReservedSpace::page_align_size_down(size_t size) {
 232   return align_size_down(size, os::vm_page_size());
 233 }
 234 
 235 
 236 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 237   return align_size_up(size, os::vm_allocation_granularity());
 238 }
 239 
 240 
 241 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 242   return align_size_down(size, os::vm_allocation_granularity());
 243 }
 244 
 245 
 246 void ReservedSpace::release() {
 247   if (is_reserved()) {
 248     char *real_base = _base - _noaccess_prefix;
 249     const size_t real_size = _size + _noaccess_prefix;
 250     if (special()) {
 251       os::release_memory_special(real_base, real_size);
 252     } else{
 253       os::release_memory(real_base, real_size);
 254     }
 255     _base = NULL;
 256     _size = 0;
 257     _noaccess_prefix = 0;
 258     _alignment = 0;
 259     _special = false;
 260     _executable = false;
 261   }
 262 }
 263 
 264 static size_t noaccess_prefix_size(size_t alignment) {
 265   return lcm(os::vm_page_size(), alignment);
 266 }
 267 
 268 void ReservedHeapSpace::establish_noaccess_prefix() {
 269   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 270   _noaccess_prefix = noaccess_prefix_size(_alignment);
 271 
 272   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 273     if (true
 274         WIN64_ONLY(&& !UseLargePages)
 275         AIX_ONLY(&& os::vm_page_size() != SIZE_64K)) {
 276       // Protect memory at the base of the allocated region.
 277       // If special, the page was committed (only matters on windows)
 278       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 279         fatal("cannot protect protection page");
 280       }
 281       if (PrintCompressedOopsMode) {
 282         tty->cr();
 283         tty->print_cr("Protected page at the reserved heap base: "
 284                       PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
 285       }
 286       assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
 287     } else {
 288       Universe::set_narrow_oop_use_implicit_null_checks(false);
 289     }
 290   }
 291 
 292   _base += _noaccess_prefix;
 293   _size -= _noaccess_prefix;
 294   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 295 }
 296 
 297 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 298 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 299 // might still fulfill the wishes of the caller.
 300 // Assures the memory is aligned to 'alignment'.
 301 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 302 void ReservedHeapSpace::try_reserve_heap(size_t size,
 303                                          size_t alignment,
 304                                          bool large,
 305                                          char* requested_address) {
 306   if (_base != NULL) {
 307     // We tried before, but we didn't like the address delivered.
 308     release();
 309   }
 310 
 311   // If OS doesn't support demand paging for large page memory, we need
 312   // to use reserve_memory_special() to reserve and pin the entire region.
 313   bool special = large && !os::can_commit_large_page_memory();
 314   char* base = NULL;
 315 
 316   if (PrintCompressedOopsMode && Verbose) {
 317     tty->print("Trying to allocate at address " PTR_FORMAT " heap of size " PTR_FORMAT ".\n",
 318                requested_address, (address)size);
 319   }
 320 
 321   if (special) {
 322     base = os::reserve_memory_special(size, alignment, requested_address, false);
 323 
 324     if (base != NULL) {
 325       // Check alignment constraints.
 326       assert((uintptr_t) base % alignment == 0,
 327              err_msg("Large pages returned a non-aligned address, base: "
 328                      PTR_FORMAT " alignment: " PTR_FORMAT,
 329                      base, (void*)(uintptr_t)alignment));
 330       _special = true;
 331     }
 332   }
 333 
 334   if (base == NULL) {
 335     // Failed; try to reserve regular memory below
 336     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 337                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 338       if (PrintCompressedOopsMode) {
 339         tty->cr();
 340         tty->print_cr("Reserve regular memory without large pages.");
 341       }
 342     }
 343 
 344     // Optimistically assume that the OSes returns an aligned base pointer.
 345     // When reserving a large address range, most OSes seem to align to at
 346     // least 64K.
 347 
 348     // If the memory was requested at a particular address, use
 349     // os::attempt_reserve_memory_at() to avoid over mapping something
 350     // important.  If available space is not detected, return NULL.
 351 
 352     if (requested_address != 0) {
 353       base = os::attempt_reserve_memory_at(size, requested_address);
 354     } else {
 355       base = os::reserve_memory(size, NULL, alignment);
 356     }
 357   }
 358   if (base == NULL) { return; }
 359 
 360   // Done
 361   _base = base;
 362   _size = size;
 363   _alignment = alignment;
 364 
 365   // Check alignment constraints
 366   if ((((size_t)base) & (alignment - 1)) != 0) {
 367     // Base not aligned, retry.
 368     release();
 369   }
 370 }
 371 
 372 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 373                                           char *lowest_start,
 374                                           size_t attach_point_alignment,
 375                                           char *aligned_heap_base_min_address,
 376                                           char *upper_bound,
 377                                           size_t size,
 378                                           size_t alignment,
 379                                           bool large) {
 380   const size_t attach_range = highest_start - lowest_start;
 381   // Cap num_attempts at possible number.
 382   // At least one is possible even for 0 sized attach range.
 383   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
 384   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
 385 
 386   const size_t stepsize = (attach_range == 0) ? // Only one try.
 387     (size_t) highest_start : align_size_up(attach_range / num_attempts_to_try, attach_point_alignment);
 388 
 389   // Try attach points from top to bottom.
 390   char* attach_point = highest_start;
 391   while (attach_point >= lowest_start  &&
 392          attach_point <= highest_start &&  // Avoid wrap around.
 393          ((_base == NULL) ||
 394           (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
 395     try_reserve_heap(size, alignment, large, attach_point);
 396     attach_point -= stepsize;
 397   }
 398 }
 399 
 400 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
 401 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
 402 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
 403 
 404 // Helper for heap allocation. Returns an array with addresses
 405 // (OS-specific) which are suited for disjoint base mode. Array is
 406 // NULL terminated.
 407 static char** get_attach_addresses_for_disjoint_mode() {
 408   static uint64_t addresses[] = {
 409      2 * SIZE_32G,
 410      3 * SIZE_32G,
 411      4 * SIZE_32G,
 412      8 * SIZE_32G,
 413     10 * SIZE_32G,
 414      1 * SIZE_64K * SIZE_32G,
 415      2 * SIZE_64K * SIZE_32G,
 416      3 * SIZE_64K * SIZE_32G,
 417      4 * SIZE_64K * SIZE_32G,
 418     16 * SIZE_64K * SIZE_32G,
 419     32 * SIZE_64K * SIZE_32G,
 420     34 * SIZE_64K * SIZE_32G,
 421     0
 422   };
 423 
 424   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
 425   // the array is sorted.
 426   uint i = 0;
 427   while (addresses[i] != 0 &&
 428          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
 429     i++;
 430   }
 431   uint start = i;
 432 
 433   // Avoid more steps than requested.
 434   i = 0;
 435   while (addresses[start+i] != 0) {
 436     if (i == HeapSearchSteps) {
 437       addresses[start+i] = 0;
 438       break;
 439     }
 440     i++;
 441   }
 442 
 443   return (char**) &addresses[start];
 444 }
 445 
 446 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
 447   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
 448             "can not allocate compressed oop heap for this size");
 449   guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
 450   assert(HeapBaseMinAddress > 0, "sanity");
 451 
 452   const size_t granularity = os::vm_allocation_granularity();
 453   assert((size & (granularity - 1)) == 0,
 454          "size not aligned to os::vm_allocation_granularity()");
 455   assert((alignment & (granularity - 1)) == 0,
 456          "alignment not aligned to os::vm_allocation_granularity()");
 457   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 458          "not a power of 2");
 459 
 460   // The necessary attach point alignment for generated wish addresses.
 461   // This is needed to increase the chance of attaching for mmap and shmat.
 462   const size_t os_attach_point_alignment =
 463     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
 464     NOT_AIX(os::vm_allocation_granularity());
 465   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 466 
 467   char *aligned_heap_base_min_address = (char *)align_ptr_up((void *)HeapBaseMinAddress, alignment);
 468   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
 469     noaccess_prefix_size(alignment) : 0;
 470 
 471   // Attempt to alloc at user-given address.
 472   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 473     try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
 474     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
 475       release();
 476     }
 477   }
 478 
 479   // Keep heap at HeapBaseMinAddress.
 480   if (_base == NULL) {
 481 
 482     // Try to allocate the heap at addresses that allow efficient oop compression.
 483     // Different schemes are tried, in order of decreasing optimization potential.
 484     //
 485     // For this, try_reserve_heap() is called with the desired heap base addresses.
 486     // A call into the os layer to allocate at a given address can return memory
 487     // at a different address than requested.  Still, this might be memory at a useful
 488     // address. try_reserve_heap() always returns this allocated memory, as only here
 489     // the criteria for a good heap are checked.
 490 
 491     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 492     // Give it several tries from top of range to bottom.
 493     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
 494 
 495       // Calc address range within we try to attach (range of possible start addresses).
 496       char* const highest_start = (char *)align_ptr_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 497       char* const lowest_start  = (char *)align_ptr_up  (        aligned_heap_base_min_address             , attach_point_alignment);
 498       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 499                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
 500     }
 501 
 502     // zerobased: Attempt to allocate in the lower 32G.
 503     // But leave room for the compressed class pointers, which is allocated above
 504     // the heap.
 505     char *zerobased_max = (char *)OopEncodingHeapMax;
 506     const size_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
 507     // For small heaps, save some space for compressed class pointer
 508     // space so it can be decoded with no base.
 509     if (UseCompressedClassPointers && !UseSharedSpaces &&
 510         OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
 511         (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
 512       zerobased_max = (char *)OopEncodingHeapMax - class_space;
 513     }
 514 
 515     // Give it several tries from top of range to bottom.
 516     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
 517         ((_base == NULL) ||                        // No previous try succeeded.
 518          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 519 
 520       // Calc address range within we try to attach (range of possible start addresses).
 521       char *const highest_start = (char *)align_ptr_down(zerobased_max - size, attach_point_alignment);
 522       // SS10 and SS12u1 cannot compile "(char *)UnscaledOopHeapMax - size" on solaris sparc 32-bit:
 523       // "Cannot use int to initialize char*." Introduce aux variable.
 524       char *unscaled_end = (char *)UnscaledOopHeapMax;
 525       unscaled_end -= size;
 526       char *lowest_start = (size < UnscaledOopHeapMax) ?
 527         MAX2(unscaled_end, aligned_heap_base_min_address) : aligned_heap_base_min_address;
 528       lowest_start  = (char *)align_ptr_up(lowest_start, attach_point_alignment);
 529       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 530                         aligned_heap_base_min_address, zerobased_max, size, alignment, large);
 531     }
 532 
 533     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 534     // implement null checks.
 535     noaccess_prefix = noaccess_prefix_size(alignment);
 536 
 537     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
 538     char** addresses = get_attach_addresses_for_disjoint_mode();
 539     int i = 0;
 540     while (addresses[i] &&                                 // End of array not yet reached.
 541            ((_base == NULL) ||                             // No previous try succeeded.
 542             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 543              !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 544       char* const attach_point = addresses[i];
 545       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 546       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 547       i++;
 548     }
 549 
 550     // Last, desperate try without any placement.
 551     if (_base == NULL) {
 552       if (PrintCompressedOopsMode && Verbose) {
 553         tty->print("Trying to allocate at address NULL heap of size " PTR_FORMAT ".\n", (address)size + noaccess_prefix);
 554       }
 555       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 556     }
 557   }
 558 }
 559 
 560 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() {
 561 
 562   if (size == 0) {
 563     return;
 564   }
 565 
 566   // Heap size should be aligned to alignment, too.
 567   guarantee(is_size_aligned(size, alignment), "set by caller");
 568 
 569   if (UseCompressedOops) {
 570     initialize_compressed_heap(size, alignment, large);
 571     if (_size > size) {
 572       // We allocated heap with noaccess prefix.
 573       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 574       // if we had to try at arbitrary address.
 575       establish_noaccess_prefix();
 576     }
 577   } else {
 578     initialize(size, alignment, large, NULL, false);
 579   }
 580 
 581   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 582          "area must be distinguishable from marks for mark-sweep");
 583   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 584          "area must be distinguishable from marks for mark-sweep");
 585 
 586   if (base() > 0) {
 587     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 588   }
 589 }
 590 
 591 // Reserve space for code segment.  Same as Java heap only we mark this as
 592 // executable.
 593 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 594                                      size_t rs_align,
 595                                      bool large) :
 596   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 597   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 598 }
 599 
 600 // VirtualSpace
 601 
 602 VirtualSpace::VirtualSpace() {
 603   _low_boundary           = NULL;
 604   _high_boundary          = NULL;
 605   _low                    = NULL;
 606   _high                   = NULL;
 607   _lower_high             = NULL;
 608   _middle_high            = NULL;
 609   _upper_high             = NULL;
 610   _lower_high_boundary    = NULL;
 611   _middle_high_boundary   = NULL;
 612   _upper_high_boundary    = NULL;
 613   _lower_alignment        = 0;
 614   _middle_alignment       = 0;
 615   _upper_alignment        = 0;
 616   _special                = false;
 617   _executable             = false;
 618 }
 619 
 620 
 621 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 622   const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
 623   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 624 }
 625 
 626 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 627   if(!rs.is_reserved()) return false;  // allocation failed.
 628   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 629   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 630 
 631   _low_boundary  = rs.base();
 632   _high_boundary = low_boundary() + rs.size();
 633 
 634   _low = low_boundary();
 635   _high = low();
 636 
 637   _special = rs.special();
 638   _executable = rs.executable();
 639 
 640   // When a VirtualSpace begins life at a large size, make all future expansion
 641   // and shrinking occur aligned to a granularity of large pages.  This avoids
 642   // fragmentation of physical addresses that inhibits the use of large pages
 643   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 644   // page size, the only spaces that get handled this way are codecache and
 645   // the heap itself, both of which provide a substantial performance
 646   // boost in many benchmarks when covered by large pages.
 647   //
 648   // No attempt is made to force large page alignment at the very top and
 649   // bottom of the space if they are not aligned so already.
 650   _lower_alignment  = os::vm_page_size();
 651   _middle_alignment = max_commit_granularity;
 652   _upper_alignment  = os::vm_page_size();
 653 
 654   // End of each region
 655   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
 656   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
 657   _upper_high_boundary = high_boundary();
 658 
 659   // High address of each region
 660   _lower_high = low_boundary();
 661   _middle_high = lower_high_boundary();
 662   _upper_high = middle_high_boundary();
 663 
 664   // commit to initial size
 665   if (committed_size > 0) {
 666     if (!expand_by(committed_size)) {
 667       return false;
 668     }
 669   }
 670   return true;
 671 }
 672 
 673 
 674 VirtualSpace::~VirtualSpace() {
 675   release();
 676 }
 677 
 678 
 679 void VirtualSpace::release() {
 680   // This does not release memory it never reserved.
 681   // Caller must release via rs.release();
 682   _low_boundary           = NULL;
 683   _high_boundary          = NULL;
 684   _low                    = NULL;
 685   _high                   = NULL;
 686   _lower_high             = NULL;
 687   _middle_high            = NULL;
 688   _upper_high             = NULL;
 689   _lower_high_boundary    = NULL;
 690   _middle_high_boundary   = NULL;
 691   _upper_high_boundary    = NULL;
 692   _lower_alignment        = 0;
 693   _middle_alignment       = 0;
 694   _upper_alignment        = 0;
 695   _special                = false;
 696   _executable             = false;
 697 }
 698 
 699 
 700 size_t VirtualSpace::committed_size() const {
 701   return pointer_delta(high(), low(), sizeof(char));
 702 }
 703 
 704 
 705 size_t VirtualSpace::reserved_size() const {
 706   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 707 }
 708 
 709 
 710 size_t VirtualSpace::uncommitted_size()  const {
 711   return reserved_size() - committed_size();
 712 }
 713 
 714 size_t VirtualSpace::actual_committed_size() const {
 715   // Special VirtualSpaces commit all reserved space up front.
 716   if (special()) {
 717     return reserved_size();
 718   }
 719 
 720   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 721   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 722   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 723 
 724 #ifdef ASSERT
 725   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 726   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 727   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 728 
 729   if (committed_high > 0) {
 730     assert(committed_low == lower, "Must be");
 731     assert(committed_middle == middle, "Must be");
 732   }
 733 
 734   if (committed_middle > 0) {
 735     assert(committed_low == lower, "Must be");
 736   }
 737   if (committed_middle < middle) {
 738     assert(committed_high == 0, "Must be");
 739   }
 740 
 741   if (committed_low < lower) {
 742     assert(committed_high == 0, "Must be");
 743     assert(committed_middle == 0, "Must be");
 744   }
 745 #endif
 746 
 747   return committed_low + committed_middle + committed_high;
 748 }
 749 
 750 
 751 bool VirtualSpace::contains(const void* p) const {
 752   return low() <= (const char*) p && (const char*) p < high();
 753 }
 754 
 755 /*
 756    First we need to determine if a particular virtual space is using large
 757    pages.  This is done at the initialize function and only virtual spaces
 758    that are larger than LargePageSizeInBytes use large pages.  Once we
 759    have determined this, all expand_by and shrink_by calls must grow and
 760    shrink by large page size chunks.  If a particular request
 761    is within the current large page, the call to commit and uncommit memory
 762    can be ignored.  In the case that the low and high boundaries of this
 763    space is not large page aligned, the pages leading to the first large
 764    page address and the pages after the last large page address must be
 765    allocated with default pages.
 766 */
 767 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 768   if (uncommitted_size() < bytes) return false;
 769 
 770   if (special()) {
 771     // don't commit memory if the entire space is pinned in memory
 772     _high += bytes;
 773     return true;
 774   }
 775 
 776   char* previous_high = high();
 777   char* unaligned_new_high = high() + bytes;
 778   assert(unaligned_new_high <= high_boundary(),
 779          "cannot expand by more than upper boundary");
 780 
 781   // Calculate where the new high for each of the regions should be.  If
 782   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 783   // then the unaligned lower and upper new highs would be the
 784   // lower_high() and upper_high() respectively.
 785   char* unaligned_lower_new_high =
 786     MIN2(unaligned_new_high, lower_high_boundary());
 787   char* unaligned_middle_new_high =
 788     MIN2(unaligned_new_high, middle_high_boundary());
 789   char* unaligned_upper_new_high =
 790     MIN2(unaligned_new_high, upper_high_boundary());
 791 
 792   // Align the new highs based on the regions alignment.  lower and upper
 793   // alignment will always be default page size.  middle alignment will be
 794   // LargePageSizeInBytes if the actual size of the virtual space is in
 795   // fact larger than LargePageSizeInBytes.
 796   char* aligned_lower_new_high =
 797     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 798   char* aligned_middle_new_high =
 799     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 800   char* aligned_upper_new_high =
 801     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 802 
 803   // Determine which regions need to grow in this expand_by call.
 804   // If you are growing in the lower region, high() must be in that
 805   // region so calculate the size based on high().  For the middle and
 806   // upper regions, determine the starting point of growth based on the
 807   // location of high().  By getting the MAX of the region's low address
 808   // (or the previous region's high address) and high(), we can tell if it
 809   // is an intra or inter region growth.
 810   size_t lower_needs = 0;
 811   if (aligned_lower_new_high > lower_high()) {
 812     lower_needs =
 813       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 814   }
 815   size_t middle_needs = 0;
 816   if (aligned_middle_new_high > middle_high()) {
 817     middle_needs =
 818       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 819   }
 820   size_t upper_needs = 0;
 821   if (aligned_upper_new_high > upper_high()) {
 822     upper_needs =
 823       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 824   }
 825 
 826   // Check contiguity.
 827   assert(low_boundary() <= lower_high() &&
 828          lower_high() <= lower_high_boundary(),
 829          "high address must be contained within the region");
 830   assert(lower_high_boundary() <= middle_high() &&
 831          middle_high() <= middle_high_boundary(),
 832          "high address must be contained within the region");
 833   assert(middle_high_boundary() <= upper_high() &&
 834          upper_high() <= upper_high_boundary(),
 835          "high address must be contained within the region");
 836 
 837   // Commit regions
 838   if (lower_needs > 0) {
 839     assert(low_boundary() <= lower_high() &&
 840            lower_high() + lower_needs <= lower_high_boundary(),
 841            "must not expand beyond region");
 842     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
 843       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 844                          ", lower_needs=" SIZE_FORMAT ", %d) failed",
 845                          lower_high(), lower_needs, _executable);)
 846       return false;
 847     } else {
 848       _lower_high += lower_needs;
 849     }
 850   }
 851   if (middle_needs > 0) {
 852     assert(lower_high_boundary() <= middle_high() &&
 853            middle_high() + middle_needs <= middle_high_boundary(),
 854            "must not expand beyond region");
 855     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
 856                            _executable)) {
 857       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 858                          ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
 859                          ", %d) failed", middle_high(), middle_needs,
 860                          middle_alignment(), _executable);)
 861       return false;
 862     }
 863     _middle_high += middle_needs;
 864   }
 865   if (upper_needs > 0) {
 866     assert(middle_high_boundary() <= upper_high() &&
 867            upper_high() + upper_needs <= upper_high_boundary(),
 868            "must not expand beyond region");
 869     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
 870       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 871                          ", upper_needs=" SIZE_FORMAT ", %d) failed",
 872                          upper_high(), upper_needs, _executable);)
 873       return false;
 874     } else {
 875       _upper_high += upper_needs;
 876     }
 877   }
 878 
 879   if (pre_touch || AlwaysPreTouch) {
 880     os::pretouch_memory(previous_high, unaligned_new_high);
 881   }
 882 
 883   _high += bytes;
 884   return true;
 885 }
 886 
 887 // A page is uncommitted if the contents of the entire page is deemed unusable.
 888 // Continue to decrement the high() pointer until it reaches a page boundary
 889 // in which case that particular page can now be uncommitted.
 890 void VirtualSpace::shrink_by(size_t size) {
 891   if (committed_size() < size)
 892     fatal("Cannot shrink virtual space to negative size");
 893 
 894   if (special()) {
 895     // don't uncommit if the entire space is pinned in memory
 896     _high -= size;
 897     return;
 898   }
 899 
 900   char* unaligned_new_high = high() - size;
 901   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 902 
 903   // Calculate new unaligned address
 904   char* unaligned_upper_new_high =
 905     MAX2(unaligned_new_high, middle_high_boundary());
 906   char* unaligned_middle_new_high =
 907     MAX2(unaligned_new_high, lower_high_boundary());
 908   char* unaligned_lower_new_high =
 909     MAX2(unaligned_new_high, low_boundary());
 910 
 911   // Align address to region's alignment
 912   char* aligned_upper_new_high =
 913     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 914   char* aligned_middle_new_high =
 915     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 916   char* aligned_lower_new_high =
 917     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 918 
 919   // Determine which regions need to shrink
 920   size_t upper_needs = 0;
 921   if (aligned_upper_new_high < upper_high()) {
 922     upper_needs =
 923       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 924   }
 925   size_t middle_needs = 0;
 926   if (aligned_middle_new_high < middle_high()) {
 927     middle_needs =
 928       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 929   }
 930   size_t lower_needs = 0;
 931   if (aligned_lower_new_high < lower_high()) {
 932     lower_needs =
 933       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 934   }
 935 
 936   // Check contiguity.
 937   assert(middle_high_boundary() <= upper_high() &&
 938          upper_high() <= upper_high_boundary(),
 939          "high address must be contained within the region");
 940   assert(lower_high_boundary() <= middle_high() &&
 941          middle_high() <= middle_high_boundary(),
 942          "high address must be contained within the region");
 943   assert(low_boundary() <= lower_high() &&
 944          lower_high() <= lower_high_boundary(),
 945          "high address must be contained within the region");
 946 
 947   // Uncommit
 948   if (upper_needs > 0) {
 949     assert(middle_high_boundary() <= aligned_upper_new_high &&
 950            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 951            "must not shrink beyond region");
 952     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 953       debug_only(warning("os::uncommit_memory failed"));
 954       return;
 955     } else {
 956       _upper_high -= upper_needs;
 957     }
 958   }
 959   if (middle_needs > 0) {
 960     assert(lower_high_boundary() <= aligned_middle_new_high &&
 961            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 962            "must not shrink beyond region");
 963     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 964       debug_only(warning("os::uncommit_memory failed"));
 965       return;
 966     } else {
 967       _middle_high -= middle_needs;
 968     }
 969   }
 970   if (lower_needs > 0) {
 971     assert(low_boundary() <= aligned_lower_new_high &&
 972            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 973            "must not shrink beyond region");
 974     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 975       debug_only(warning("os::uncommit_memory failed"));
 976       return;
 977     } else {
 978       _lower_high -= lower_needs;
 979     }
 980   }
 981 
 982   _high -= size;
 983 }
 984 
 985 #ifndef PRODUCT
 986 void VirtualSpace::check_for_contiguity() {
 987   // Check contiguity.
 988   assert(low_boundary() <= lower_high() &&
 989          lower_high() <= lower_high_boundary(),
 990          "high address must be contained within the region");
 991   assert(lower_high_boundary() <= middle_high() &&
 992          middle_high() <= middle_high_boundary(),
 993          "high address must be contained within the region");
 994   assert(middle_high_boundary() <= upper_high() &&
 995          upper_high() <= upper_high_boundary(),
 996          "high address must be contained within the region");
 997   assert(low() >= low_boundary(), "low");
 998   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
 999   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
1000   assert(high() <= upper_high(), "upper high");
1001 }
1002 
1003 void VirtualSpace::print_on(outputStream* out) {
1004   out->print   ("Virtual space:");
1005   if (special()) out->print(" (pinned in memory)");
1006   out->cr();
1007   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
1008   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
1009   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
1010   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
1011 }
1012 
1013 void VirtualSpace::print() {
1014   print_on(tty);
1015 }
1016 
1017 /////////////// Unit tests ///////////////
1018 
1019 #ifndef PRODUCT
1020 
1021 #define test_log(...) \
1022   do {\
1023     if (VerboseInternalVMTests) { \
1024       tty->print_cr(__VA_ARGS__); \
1025       tty->flush(); \
1026     }\
1027   } while (false)
1028 
1029 class TestReservedSpace : AllStatic {
1030  public:
1031   static void small_page_write(void* addr, size_t size) {
1032     size_t page_size = os::vm_page_size();
1033 
1034     char* end = (char*)addr + size;
1035     for (char* p = (char*)addr; p < end; p += page_size) {
1036       *p = 1;
1037     }
1038   }
1039 
1040   static void release_memory_for_test(ReservedSpace rs) {
1041     if (rs.special()) {
1042       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1043     } else {
1044       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1045     }
1046   }
1047 
1048   static void test_reserved_space1(size_t size, size_t alignment) {
1049     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
1050 
1051     assert(is_size_aligned(size, alignment), "Incorrect input parameters");
1052 
1053     ReservedSpace rs(size,          // size
1054                      alignment,     // alignment
1055                      UseLargePages, // large
1056                      (char *)NULL); // requested_address
1057 
1058     test_log(" rs.special() == %d", rs.special());
1059 
1060     assert(rs.base() != NULL, "Must be");
1061     assert(rs.size() == size, "Must be");
1062 
1063     assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1064     assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1065 
1066     if (rs.special()) {
1067       small_page_write(rs.base(), size);
1068     }
1069 
1070     release_memory_for_test(rs);
1071   }
1072 
1073   static void test_reserved_space2(size_t size) {
1074     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
1075 
1076     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1077 
1078     ReservedSpace rs(size);
1079 
1080     test_log(" rs.special() == %d", rs.special());
1081 
1082     assert(rs.base() != NULL, "Must be");
1083     assert(rs.size() == size, "Must be");
1084 
1085     if (rs.special()) {
1086       small_page_write(rs.base(), size);
1087     }
1088 
1089     release_memory_for_test(rs);
1090   }
1091 
1092   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1093     test_log("test_reserved_space3(%p, %p, %d)",
1094         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
1095 
1096     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1097     assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
1098 
1099     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1100 
1101     ReservedSpace rs(size, alignment, large, false);
1102 
1103     test_log(" rs.special() == %d", rs.special());
1104 
1105     assert(rs.base() != NULL, "Must be");
1106     assert(rs.size() == size, "Must be");
1107 
1108     if (rs.special()) {
1109       small_page_write(rs.base(), size);
1110     }
1111 
1112     release_memory_for_test(rs);
1113   }
1114 
1115 
1116   static void test_reserved_space1() {
1117     size_t size = 2 * 1024 * 1024;
1118     size_t ag   = os::vm_allocation_granularity();
1119 
1120     test_reserved_space1(size,      ag);
1121     test_reserved_space1(size * 2,  ag);
1122     test_reserved_space1(size * 10, ag);
1123   }
1124 
1125   static void test_reserved_space2() {
1126     size_t size = 2 * 1024 * 1024;
1127     size_t ag = os::vm_allocation_granularity();
1128 
1129     test_reserved_space2(size * 1);
1130     test_reserved_space2(size * 2);
1131     test_reserved_space2(size * 10);
1132     test_reserved_space2(ag);
1133     test_reserved_space2(size - ag);
1134     test_reserved_space2(size);
1135     test_reserved_space2(size + ag);
1136     test_reserved_space2(size * 2);
1137     test_reserved_space2(size * 2 - ag);
1138     test_reserved_space2(size * 2 + ag);
1139     test_reserved_space2(size * 3);
1140     test_reserved_space2(size * 3 - ag);
1141     test_reserved_space2(size * 3 + ag);
1142     test_reserved_space2(size * 10);
1143     test_reserved_space2(size * 10 + size / 2);
1144   }
1145 
1146   static void test_reserved_space3() {
1147     size_t ag = os::vm_allocation_granularity();
1148 
1149     test_reserved_space3(ag,      ag    , false);
1150     test_reserved_space3(ag * 2,  ag    , false);
1151     test_reserved_space3(ag * 3,  ag    , false);
1152     test_reserved_space3(ag * 2,  ag * 2, false);
1153     test_reserved_space3(ag * 4,  ag * 2, false);
1154     test_reserved_space3(ag * 8,  ag * 2, false);
1155     test_reserved_space3(ag * 4,  ag * 4, false);
1156     test_reserved_space3(ag * 8,  ag * 4, false);
1157     test_reserved_space3(ag * 16, ag * 4, false);
1158 
1159     if (UseLargePages) {
1160       size_t lp = os::large_page_size();
1161 
1162       // Without large pages
1163       test_reserved_space3(lp,     ag * 4, false);
1164       test_reserved_space3(lp * 2, ag * 4, false);
1165       test_reserved_space3(lp * 4, ag * 4, false);
1166       test_reserved_space3(lp,     lp    , false);
1167       test_reserved_space3(lp * 2, lp    , false);
1168       test_reserved_space3(lp * 3, lp    , false);
1169       test_reserved_space3(lp * 2, lp * 2, false);
1170       test_reserved_space3(lp * 4, lp * 2, false);
1171       test_reserved_space3(lp * 8, lp * 2, false);
1172 
1173       // With large pages
1174       test_reserved_space3(lp, ag * 4    , true);
1175       test_reserved_space3(lp * 2, ag * 4, true);
1176       test_reserved_space3(lp * 4, ag * 4, true);
1177       test_reserved_space3(lp, lp        , true);
1178       test_reserved_space3(lp * 2, lp    , true);
1179       test_reserved_space3(lp * 3, lp    , true);
1180       test_reserved_space3(lp * 2, lp * 2, true);
1181       test_reserved_space3(lp * 4, lp * 2, true);
1182       test_reserved_space3(lp * 8, lp * 2, true);
1183     }
1184   }
1185 
1186   static void test_reserved_space() {
1187     test_reserved_space1();
1188     test_reserved_space2();
1189     test_reserved_space3();
1190   }
1191 };
1192 
1193 void TestReservedSpace_test() {
1194   TestReservedSpace::test_reserved_space();
1195 }
1196 
1197 #define assert_equals(actual, expected)     \
1198   assert(actual == expected,                \
1199     err_msg("Got " SIZE_FORMAT " expected " \
1200       SIZE_FORMAT, actual, expected));
1201 
1202 #define assert_ge(value1, value2)                  \
1203   assert(value1 >= value2,                         \
1204     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
1205       #value2 "': " SIZE_FORMAT, value1, value2));
1206 
1207 #define assert_lt(value1, value2)                  \
1208   assert(value1 < value2,                          \
1209     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
1210       #value2 "': " SIZE_FORMAT, value1, value2));
1211 
1212 
1213 class TestVirtualSpace : AllStatic {
1214   enum TestLargePages {
1215     Default,
1216     Disable,
1217     Reserve,
1218     Commit
1219   };
1220 
1221   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
1222     switch(mode) {
1223     default:
1224     case Default:
1225     case Reserve:
1226       return ReservedSpace(reserve_size_aligned);
1227     case Disable:
1228     case Commit:
1229       return ReservedSpace(reserve_size_aligned,
1230                            os::vm_allocation_granularity(),
1231                            /* large */ false, /* exec */ false);
1232     }
1233   }
1234 
1235   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1236     switch(mode) {
1237     default:
1238     case Default:
1239     case Reserve:
1240       return vs.initialize(rs, 0);
1241     case Disable:
1242       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1243     case Commit:
1244       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1245     }
1246   }
1247 
1248  public:
1249   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1250                                                         TestLargePages mode = Default) {
1251     size_t granularity = os::vm_allocation_granularity();
1252     size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
1253 
1254     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1255 
1256     assert(reserved.is_reserved(), "Must be");
1257 
1258     VirtualSpace vs;
1259     bool initialized = initialize_virtual_space(vs, reserved, mode);
1260     assert(initialized, "Failed to initialize VirtualSpace");
1261 
1262     vs.expand_by(commit_size, false);
1263 
1264     if (vs.special()) {
1265       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1266     } else {
1267       assert_ge(vs.actual_committed_size(), commit_size);
1268       // Approximate the commit granularity.
1269       // Make sure that we don't commit using large pages
1270       // if large pages has been disabled for this VirtualSpace.
1271       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1272                                    os::vm_page_size() : os::large_page_size();
1273       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1274     }
1275 
1276     reserved.release();
1277   }
1278 
1279   static void test_virtual_space_actual_committed_space_one_large_page() {
1280     if (!UseLargePages) {
1281       return;
1282     }
1283 
1284     size_t large_page_size = os::large_page_size();
1285 
1286     ReservedSpace reserved(large_page_size, large_page_size, true, false);
1287 
1288     assert(reserved.is_reserved(), "Must be");
1289 
1290     VirtualSpace vs;
1291     bool initialized = vs.initialize(reserved, 0);
1292     assert(initialized, "Failed to initialize VirtualSpace");
1293 
1294     vs.expand_by(large_page_size, false);
1295 
1296     assert_equals(vs.actual_committed_size(), large_page_size);
1297 
1298     reserved.release();
1299   }
1300 
1301   static void test_virtual_space_actual_committed_space() {
1302     test_virtual_space_actual_committed_space(4 * K, 0);
1303     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1304     test_virtual_space_actual_committed_space(8 * K, 0);
1305     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1306     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1307     test_virtual_space_actual_committed_space(12 * K, 0);
1308     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1309     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1310     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1311     test_virtual_space_actual_committed_space(64 * K, 0);
1312     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1313     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1314     test_virtual_space_actual_committed_space(2 * M, 0);
1315     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1316     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1317     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1318     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1319     test_virtual_space_actual_committed_space(10 * M, 0);
1320     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1321     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1322     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1323     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1324     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1325     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1326   }
1327 
1328   static void test_virtual_space_disable_large_pages() {
1329     if (!UseLargePages) {
1330       return;
1331     }
1332     // These test cases verify that if we force VirtualSpace to disable large pages
1333     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1334     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1335     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1336     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1337     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1338     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1339     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1340 
1341     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1342     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1343     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1344     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1345     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1346     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1347     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1348 
1349     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1350     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1351     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1352     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1353     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1354     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1355     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1356   }
1357 
1358   static void test_virtual_space() {
1359     test_virtual_space_actual_committed_space();
1360     test_virtual_space_actual_committed_space_one_large_page();
1361     test_virtual_space_disable_large_pages();
1362   }
1363 };
1364 
1365 void TestVirtualSpace_test() {
1366   TestVirtualSpace::test_virtual_space();
1367 }
1368 
1369 #endif // PRODUCT
1370 
1371 #endif