1 /*
   2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "oops/markOop.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "runtime/virtualspace.hpp"
  29 #include "services/memTracker.hpp"
  30 
  31 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  32 
  33 // ReservedSpace
  34 
  35 // Dummy constructor
  36 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  37     _alignment(0), _special(false), _executable(false) {
  38 }
  39 
  40 ReservedSpace::ReservedSpace(size_t size) {
  41   size_t page_size = os::page_size_for_region(size, 1);
  42   bool large_pages = page_size != (size_t)os::vm_page_size();
  43   // Don't force the alignment to be large page aligned,
  44   // since that will waste memory.
  45   size_t alignment = os::vm_allocation_granularity();
  46   initialize(size, alignment, large_pages, NULL, false);
  47 }
  48 
  49 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  50                              bool large,
  51                              char* requested_address) {
  52   initialize(size, alignment, large, requested_address, false);
  53 }
  54 
  55 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  56                              bool large,
  57                              bool executable) {
  58   initialize(size, alignment, large, NULL, executable);
  59 }
  60 
  61 // Helper method.
  62 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  63                                            const size_t size, bool special)
  64 {
  65   if (base == requested_address || requested_address == NULL)
  66     return false; // did not fail
  67 
  68   if (base != NULL) {
  69     // Different reserve address may be acceptable in other cases
  70     // but for compressed oops heap should be at requested address.
  71     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  72     if (PrintCompressedOopsMode) {
  73       tty->cr();
  74       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
  75     }
  76     // OS ignored requested address. Try different address.
  77     if (special) {
  78       if (!os::release_memory_special(base, size)) {
  79         fatal("os::release_memory_special failed");
  80       }
  81     } else {
  82       if (!os::release_memory(base, size)) {
  83         fatal("os::release_memory failed");
  84       }
  85     }
  86   }
  87   return true;
  88 }
  89 
  90 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
  91                                char* requested_address,
  92                                bool executable) {
  93   const size_t granularity = os::vm_allocation_granularity();
  94   assert((size & (granularity - 1)) == 0,
  95          "size not aligned to os::vm_allocation_granularity()");
  96   assert((alignment & (granularity - 1)) == 0,
  97          "alignment not aligned to os::vm_allocation_granularity()");
  98   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
  99          "not a power of 2");
 100 
 101   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 102 
 103   _base = NULL;
 104   _size = 0;
 105   _special = false;
 106   _executable = executable;
 107   _alignment = 0;
 108   _noaccess_prefix = 0;
 109   if (size == 0) {
 110     return;
 111   }
 112 
 113   // If OS doesn't support demand paging for large page memory, we need
 114   // to use reserve_memory_special() to reserve and pin the entire region.
 115   bool special = large && !os::can_commit_large_page_memory();
 116   char* base = NULL;
 117 
 118   if (special) {
 119 
 120     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 121 
 122     if (base != NULL) {
 123       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 124         // OS ignored requested address. Try different address.
 125         return;
 126       }
 127       // Check alignment constraints.
 128       assert((uintptr_t) base % alignment == 0,
 129              err_msg("Large pages returned a non-aligned address, base: "
 130                  PTR_FORMAT " alignment: " PTR_FORMAT,
 131                  base, (void*)(uintptr_t)alignment));
 132       _special = true;
 133     } else {
 134       // failed; try to reserve regular memory below
 135       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 136                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 137         if (PrintCompressedOopsMode) {
 138           tty->cr();
 139           tty->print_cr("Reserve regular memory without large pages.");
 140         }
 141       }
 142     }
 143   }
 144 
 145   if (base == NULL) {
 146     // Optimistically assume that the OSes returns an aligned base pointer.
 147     // When reserving a large address range, most OSes seem to align to at
 148     // least 64K.
 149 
 150     // If the memory was requested at a particular address, use
 151     // os::attempt_reserve_memory_at() to avoid over mapping something
 152     // important.  If available space is not detected, return NULL.
 153 
 154     if (requested_address != 0) {
 155       base = os::attempt_reserve_memory_at(size, requested_address);
 156       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 157         // OS ignored requested address. Try different address.
 158         base = NULL;
 159       }
 160     } else {
 161       base = os::reserve_memory(size, NULL, alignment);
 162     }
 163 
 164     if (base == NULL) return;
 165 
 166     // Check alignment constraints
 167     if ((((size_t)base) & (alignment - 1)) != 0) {
 168       // Base not aligned, retry
 169       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 170       // Make sure that size is aligned
 171       size = align_size_up(size, alignment);
 172       base = os::reserve_memory_aligned(size, alignment);
 173 
 174       if (requested_address != 0 &&
 175           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 176         // As a result of the alignment constraints, the allocated base differs
 177         // from the requested address. Return back to the caller who can
 178         // take remedial action (like try again without a requested address).
 179         assert(_base == NULL, "should be");
 180         return;
 181       }
 182     }
 183   }
 184   // Done
 185   _base = base;
 186   _size = size;
 187   _alignment = alignment;
 188 
 189   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 190          "area must be distinguishable from marks for mark-sweep");
 191   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 192          "area must be distinguishable from marks for mark-sweep");
 193 }
 194 
 195 
 196 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 197                              bool special, bool executable) {
 198   assert((size % os::vm_allocation_granularity()) == 0,
 199          "size not allocation aligned");
 200   _base = base;
 201   _size = size;
 202   _alignment = alignment;
 203   _noaccess_prefix = 0;
 204   _special = special;
 205   _executable = executable;
 206 }
 207 
 208 
 209 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 210                                         bool split, bool realloc) {
 211   assert(partition_size <= size(), "partition failed");
 212   if (split) {
 213     os::split_reserved_memory(base(), size(), partition_size, realloc);
 214   }
 215   ReservedSpace result(base(), partition_size, alignment, special(),
 216                        executable());
 217   return result;
 218 }
 219 
 220 
 221 ReservedSpace
 222 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 223   assert(partition_size <= size(), "partition failed");
 224   ReservedSpace result(base() + partition_size, size() - partition_size,
 225                        alignment, special(), executable());
 226   return result;
 227 }
 228 
 229 
 230 size_t ReservedSpace::page_align_size_up(size_t size) {
 231   return align_size_up(size, os::vm_page_size());
 232 }
 233 
 234 
 235 size_t ReservedSpace::page_align_size_down(size_t size) {
 236   return align_size_down(size, os::vm_page_size());
 237 }
 238 
 239 
 240 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 241   return align_size_up(size, os::vm_allocation_granularity());
 242 }
 243 
 244 
 245 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 246   return align_size_down(size, os::vm_allocation_granularity());
 247 }
 248 
 249 
 250 void ReservedSpace::release() {
 251   if (is_reserved()) {
 252     char *real_base = _base - _noaccess_prefix;
 253     const size_t real_size = _size + _noaccess_prefix;
 254     if (special()) {
 255       os::release_memory_special(real_base, real_size);
 256     } else{
 257       os::release_memory(real_base, real_size);
 258     }
 259     _base = NULL;
 260     _size = 0;
 261     _noaccess_prefix = 0;
 262     _special = false;
 263     _executable = false;
 264   }
 265 }
 266 
 267 static size_t noaccess_prefix_size(size_t alignment) {
 268   return lcm(os::vm_page_size(), alignment);
 269 }
 270 
 271 void ReservedSpace::establish_noaccess_prefix() {
 272   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 273 
 274   // ...
 275   _noaccess_prefix = noaccess_prefix_size(_alignment);
 276 
 277   if (true
 278       WIN64_ONLY(&& !UseLargePages)
 279       AIX_ONLY(&& os::vm_page_size() != SIZE_64K)) {
 280     // Protect memory at the base of the allocated region.
 281     // If special, the page was committed (only matters on windows)
 282     if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 283       fatal("cannot protect protection page");
 284     }
 285     if (PrintCompressedOopsMode) {
 286       tty->cr();
 287       tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
 288     }
 289     assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
 290   } else {
 291     Universe::set_narrow_oop_use_implicit_null_checks(false);
 292   }
 293 
 294   _base += _noaccess_prefix;
 295   _size -= _noaccess_prefix;
 296   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 297 }
 298 
 299 
 300 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 301 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 302 // might still fulfill the wishes of the caller.
 303 // Assures the memory is aligned to 'alignment'.
 304 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 305 void ReservedHeapSpace::try_reserve_heap(size_t size, size_t alignment, bool large, char* requested_address) {
 306   if (_base != NULL) {
 307     // We tried before, but we didn't like the address delivered.
 308     release();
 309   }
 310 
 311   // If OS doesn't support demand paging for large page memory, we need
 312   // to use reserve_memory_special() to reserve and pin the entire region.
 313   bool special = large && !os::can_commit_large_page_memory();
 314   char* base = NULL;
 315 
 316   if (PrintCompressedOopsMode && Verbose) {
 317     tty->print("Trying to allocate at address " PTR_FORMAT " size" PTR_FORMAT ".\n",
 318                requested_address, (address)size);
 319   }
 320 
 321   if (special) {
 322     base = os::reserve_memory_special(size, alignment, requested_address, false);
 323 
 324     if (base != NULL) {
 325       // Check alignment constraints.
 326       assert((uintptr_t) base % alignment == 0,
 327              err_msg("Large pages returned a non-aligned address, base: "
 328                      PTR_FORMAT " alignment: " PTR_FORMAT,
 329                      base, (void*)(uintptr_t)alignment));
 330       _special = true;
 331     }
 332   }
 333 
 334   if (!base) {
 335     // Failed; try to reserve regular memory below
 336     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 337                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 338       if (PrintCompressedOopsMode) {
 339         tty->cr();
 340         tty->print_cr("Reserve regular memory without large pages.");
 341       }
 342     }
 343 
 344     // Optimistically assume that the OSes returns an aligned base pointer.
 345     // When reserving a large address range, most OSes seem to align to at
 346     // least 64K.
 347 
 348     // If the memory was requested at a particular address, use
 349     // os::attempt_reserve_memory_at() to avoid over mapping something
 350     // important.  If available space is not detected, return NULL.
 351 
 352     if (requested_address != 0) {
 353       base = os::attempt_reserve_memory_at(size, requested_address);
 354     } else {
 355       base = os::reserve_memory(size, NULL, alignment);
 356     }
 357   }
 358   if (base == NULL) return;
 359 
 360   // Done
 361   _base = base;
 362   _size = size;
 363   _alignment = alignment;
 364 
 365   // Check alignment constraints
 366   if ((((size_t)base) & (alignment - 1)) != 0) {
 367     // Base not aligned, retry.
 368     release();
 369     return;
 370   }
 371 }
 372 
 373 void ReservedHeapSpace::initialize_compressed_heap(size_t size, size_t alignment, bool large) {
 374   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
 375             "can not allocate compressed oop heap for this size");
 376   guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
 377   assert(HeapBaseMinAddress > 0, "sanity");
 378 
 379   const size_t granularity = os::vm_allocation_granularity();
 380   assert((size & (granularity - 1)) == 0,
 381          "size not aligned to os::vm_allocation_granularity()");
 382   assert((alignment & (granularity - 1)) == 0,
 383          "alignment not aligned to os::vm_allocation_granularity()");
 384   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 385          "not a power of 2");
 386 
 387   // The necessary attach point alignment for generated wish addresses.
 388   // This is needed to increase the chance of attaching for mmap and shmat.
 389   const size_t os_attach_point_alignment =
 390     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
 391     NOT_AIX(os::vm_allocation_granularity());
 392   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 393 
 394   guarantee(HeapSearchSteps > 0, "Don't set HeapSearchSteps to 0");
 395   const uint64_t num_attempts = HeapSearchSteps;
 396 
 397   char *aligned_HBMA = (char *)align_ptr_up((void *)HeapBaseMinAddress, alignment);
 398   size_t noaccess_prefix = ((aligned_HBMA + size) > (char*)OopEncodingHeapMax) ? noaccess_prefix_size(alignment) : 0;
 399 
 400   // Attempt to alloc at user-given address.
 401   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 402     if (PrintCompressedOopsMode && Verbose) {
 403       tty->print(" == H E A P B A S E M I N A D D R E S S ==\n");
 404     }
 405     try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_HBMA);
 406     if (_base != aligned_HBMA) { // Enforce this exact address.
 407       release();
 408     }
 409   }
 410 
 411   // Keep heap at HeapBaseMinAddress.
 412   if (!_base) {
 413 
 414     if (PrintCompressedOopsMode && Verbose) {
 415       tty->print(" == U N S C A L E D ==\n");
 416     }
 417 
 418     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 419     // Give it several tries from top of range to bottom.
 420     if (aligned_HBMA + size <= (char *)UnscaledOopHeapMax) {
 421 
 422       // Calc address range within we try to attach (range of possible start addresses).
 423       char* const highest_start = (char *)align_ptr_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 424       char* const lowest_start  = (char *)align_ptr_up  (        aligned_HBMA             , attach_point_alignment);
 425       const size_t attach_range = highest_start - lowest_start;
 426 
 427       // Cap num_attempts at possible number.
 428       const uint64_t num_attempts_possible =
 429         (attach_range / attach_point_alignment) + 1; // At least one is possible even for 0 sized attach range.
 430       const uint64_t num_attempts_to_try = MIN2(num_attempts, num_attempts_possible);
 431 
 432       const size_t stepsize = align_size_up(attach_range / num_attempts_to_try, attach_point_alignment);
 433 
 434       // Try attach points from top to bottom.
 435       char* attach_point = highest_start;
 436       while (attach_point >= lowest_start  &&
 437              attach_point <= highest_start &&  // Avoid wrap around.
 438              (!_base || _base < aligned_HBMA || _base + size > (char *)UnscaledOopHeapMax)) {
 439         try_reserve_heap(size, alignment, large, attach_point);
 440         attach_point -= stepsize;
 441       }
 442 
 443     }
 444 
 445     if (PrintCompressedOopsMode && Verbose) {
 446       tty->print(" == Z E R O B A S E D ==\n");
 447     }
 448 
 449     // zerobased: Attempt to allocate in the lower 32G.
 450     // But leave room for the compressed class pointers, which is allocated above
 451     // the heap.
 452     char *zerobased_max = (char *)OopEncodingHeapMax;
 453     // For small heaps, save some space for compressed class pointer
 454     // space so it can be decoded with no base.
 455     if (UseCompressedClassPointers && !UseSharedSpaces &&
 456         OopEncodingHeapMax <= KlassEncodingMetaspaceMax) {
 457       const size_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
 458       zerobased_max = (char *)OopEncodingHeapMax - class_space;
 459     }
 460 
 461     // Give it several tries from top of range to bottom.
 462     if (aligned_HBMA + size <= zerobased_max &&       // Zerobased theoretical possible.
 463         (!_base ||                                    // No previous try succeeded.
 464          (_base && _base + size > zerobased_max))) {  // Unscaled delivered an arbitrary address.
 465 
 466       // Calc address range within we try to attach (range of possible start addresses).
 467       char *const highest_start = (char *)align_ptr_down(zerobased_max - size, attach_point_alignment);
 468       // SS10 and SS12u1 cannot compile "(char *)UnscaledOopHeapMax - size" on solaris sparc 32-bit:
 469       // "Cannot use int to initialize char*." Introduce aux variable.
 470       char *unscaled_end = (char *)UnscaledOopHeapMax;
 471       unscaled_end -= size;
 472       char *lowest_start = (size < UnscaledOopHeapMax) ? MAX2(unscaled_end, aligned_HBMA) : aligned_HBMA;
 473       lowest_start  = (char *)align_ptr_up(lowest_start, attach_point_alignment);
 474       const size_t attach_range = highest_start - lowest_start;
 475 
 476       // Cap num_attempts at possible number.
 477       const uint64_t num_attempts_possible =
 478         (attach_range / attach_point_alignment) + 1; // At least one is possible even for 0 sized attach range.
 479       const uint64_t num_attempts_to_try = MIN2(num_attempts, num_attempts_possible);
 480 
 481       const size_t stepsize = align_size_up(attach_range / num_attempts_to_try, attach_point_alignment);
 482 
 483       // Try attach points from top to bottom.
 484       char* attach_point = highest_start;
 485       while (attach_point >= lowest_start  &&
 486              attach_point <= highest_start &&  // Avoid wrap around.
 487              (!_base || _base < aligned_HBMA || _base + size > zerobased_max)) {
 488         try_reserve_heap(size, alignment, large, attach_point);
 489         attach_point -= stepsize;
 490       }
 491 
 492     }
 493 
 494     if (PrintCompressedOopsMode && Verbose) {
 495       tty->print(" == D I S J O I N T B A S E ==\n");
 496     }
 497 
 498     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 499     // implement null checks.
 500     noaccess_prefix = noaccess_prefix_size(alignment);
 501 
 502     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
 503     char** addresses = Universe::get_attach_addresses_for_disjoint_mode();
 504     int i = 0;
 505     while (addresses[i] &&
 506            (!_base ||
 507             (_base && _base + size > (char *)OopEncodingHeapMax &&
 508              !Universe::is_disjoint_heap_base_address((address)_base)))) {
 509       char* const attach_point = addresses[i];
 510       assert(attach_point >= aligned_HBMA, "Flag support broken");
 511       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 512       i++;
 513     }
 514 
 515     if (PrintCompressedOopsMode && Verbose) {
 516       tty->print(" == H E A P B A S E D ==\n");
 517     }
 518 
 519     // Last, desperate try without any placement.
 520     if (!_base) {
 521       if (PrintCompressedOopsMode && Verbose) {
 522         tty->print("Trying to allocate at address NULL size" PTR_FORMAT ".\n", (address)size);
 523       }
 524       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 525     }
 526   }
 527 
 528   assert(!_base || markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 529          "area must be distinguishable from marks for mark-sweep");
 530   assert(!_base || markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 531          "area must be distinguishable from marks for mark-sweep");
 532 }
 533 
 534 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() {
 535 
 536   if (size == 0) {
 537     return;
 538   }
 539 
 540   // Heap size should be aligned to alignment, too.
 541   guarantee(is_size_aligned(size, alignment), "set by caller");
 542 
 543   if (UseCompressedOops) {
 544     initialize_compressed_heap(size, alignment, large);
 545     if (base() && base() + size > (char *)OopEncodingHeapMax) {
 546       establish_noaccess_prefix();
 547     }
 548 
 549   } else {
 550     initialize(size, alignment, large, NULL, false);
 551   }
 552 
 553   if (base() > 0) {
 554     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 555   }
 556 }
 557 
 558 // Reserve space for code segment.  Same as Java heap only we mark this as
 559 // executable.
 560 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 561                                      size_t rs_align,
 562                                      bool large) :
 563   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 564   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 565 }
 566 
 567 // VirtualSpace
 568 
 569 VirtualSpace::VirtualSpace() {
 570   _low_boundary           = NULL;
 571   _high_boundary          = NULL;
 572   _low                    = NULL;
 573   _high                   = NULL;
 574   _lower_high             = NULL;
 575   _middle_high            = NULL;
 576   _upper_high             = NULL;
 577   _lower_high_boundary    = NULL;
 578   _middle_high_boundary   = NULL;
 579   _upper_high_boundary    = NULL;
 580   _lower_alignment        = 0;
 581   _middle_alignment       = 0;
 582   _upper_alignment        = 0;
 583   _special                = false;
 584   _executable             = false;
 585 }
 586 
 587 
 588 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 589   const size_t max_commit_granularity = os::page_size_for_region(rs.size(), 1);
 590   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 591 }
 592 
 593 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 594   if(!rs.is_reserved()) return false;  // allocation failed.
 595   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 596   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 597 
 598   _low_boundary  = rs.base();
 599   _high_boundary = low_boundary() + rs.size();
 600 
 601   _low = low_boundary();
 602   _high = low();
 603 
 604   _special = rs.special();
 605   _executable = rs.executable();
 606 
 607   // When a VirtualSpace begins life at a large size, make all future expansion
 608   // and shrinking occur aligned to a granularity of large pages.  This avoids
 609   // fragmentation of physical addresses that inhibits the use of large pages
 610   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 611   // page size, the only spaces that get handled this way are codecache and
 612   // the heap itself, both of which provide a substantial performance
 613   // boost in many benchmarks when covered by large pages.
 614   //
 615   // No attempt is made to force large page alignment at the very top and
 616   // bottom of the space if they are not aligned so already.
 617   _lower_alignment  = os::vm_page_size();
 618   _middle_alignment = max_commit_granularity;
 619   _upper_alignment  = os::vm_page_size();
 620 
 621   // End of each region
 622   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
 623   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
 624   _upper_high_boundary = high_boundary();
 625 
 626   // High address of each region
 627   _lower_high = low_boundary();
 628   _middle_high = lower_high_boundary();
 629   _upper_high = middle_high_boundary();
 630 
 631   // commit to initial size
 632   if (committed_size > 0) {
 633     if (!expand_by(committed_size)) {
 634       return false;
 635     }
 636   }
 637   return true;
 638 }
 639 
 640 
 641 VirtualSpace::~VirtualSpace() {
 642   release();
 643 }
 644 
 645 
 646 void VirtualSpace::release() {
 647   // This does not release memory it never reserved.
 648   // Caller must release via rs.release();
 649   _low_boundary           = NULL;
 650   _high_boundary          = NULL;
 651   _low                    = NULL;
 652   _high                   = NULL;
 653   _lower_high             = NULL;
 654   _middle_high            = NULL;
 655   _upper_high             = NULL;
 656   _lower_high_boundary    = NULL;
 657   _middle_high_boundary   = NULL;
 658   _upper_high_boundary    = NULL;
 659   _lower_alignment        = 0;
 660   _middle_alignment       = 0;
 661   _upper_alignment        = 0;
 662   _special                = false;
 663   _executable             = false;
 664 }
 665 
 666 
 667 size_t VirtualSpace::committed_size() const {
 668   return pointer_delta(high(), low(), sizeof(char));
 669 }
 670 
 671 
 672 size_t VirtualSpace::reserved_size() const {
 673   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 674 }
 675 
 676 
 677 size_t VirtualSpace::uncommitted_size()  const {
 678   return reserved_size() - committed_size();
 679 }
 680 
 681 size_t VirtualSpace::actual_committed_size() const {
 682   // Special VirtualSpaces commit all reserved space up front.
 683   if (special()) {
 684     return reserved_size();
 685   }
 686 
 687   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 688   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 689   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 690 
 691 #ifdef ASSERT
 692   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 693   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 694   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 695 
 696   if (committed_high > 0) {
 697     assert(committed_low == lower, "Must be");
 698     assert(committed_middle == middle, "Must be");
 699   }
 700 
 701   if (committed_middle > 0) {
 702     assert(committed_low == lower, "Must be");
 703   }
 704   if (committed_middle < middle) {
 705     assert(committed_high == 0, "Must be");
 706   }
 707 
 708   if (committed_low < lower) {
 709     assert(committed_high == 0, "Must be");
 710     assert(committed_middle == 0, "Must be");
 711   }
 712 #endif
 713 
 714   return committed_low + committed_middle + committed_high;
 715 }
 716 
 717 
 718 bool VirtualSpace::contains(const void* p) const {
 719   return low() <= (const char*) p && (const char*) p < high();
 720 }
 721 
 722 /*
 723    First we need to determine if a particular virtual space is using large
 724    pages.  This is done at the initialize function and only virtual spaces
 725    that are larger than LargePageSizeInBytes use large pages.  Once we
 726    have determined this, all expand_by and shrink_by calls must grow and
 727    shrink by large page size chunks.  If a particular request
 728    is within the current large page, the call to commit and uncommit memory
 729    can be ignored.  In the case that the low and high boundaries of this
 730    space is not large page aligned, the pages leading to the first large
 731    page address and the pages after the last large page address must be
 732    allocated with default pages.
 733 */
 734 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 735   if (uncommitted_size() < bytes) return false;
 736 
 737   if (special()) {
 738     // don't commit memory if the entire space is pinned in memory
 739     _high += bytes;
 740     return true;
 741   }
 742 
 743   char* previous_high = high();
 744   char* unaligned_new_high = high() + bytes;
 745   assert(unaligned_new_high <= high_boundary(),
 746          "cannot expand by more than upper boundary");
 747 
 748   // Calculate where the new high for each of the regions should be.  If
 749   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 750   // then the unaligned lower and upper new highs would be the
 751   // lower_high() and upper_high() respectively.
 752   char* unaligned_lower_new_high =
 753     MIN2(unaligned_new_high, lower_high_boundary());
 754   char* unaligned_middle_new_high =
 755     MIN2(unaligned_new_high, middle_high_boundary());
 756   char* unaligned_upper_new_high =
 757     MIN2(unaligned_new_high, upper_high_boundary());
 758 
 759   // Align the new highs based on the regions alignment.  lower and upper
 760   // alignment will always be default page size.  middle alignment will be
 761   // LargePageSizeInBytes if the actual size of the virtual space is in
 762   // fact larger than LargePageSizeInBytes.
 763   char* aligned_lower_new_high =
 764     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 765   char* aligned_middle_new_high =
 766     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 767   char* aligned_upper_new_high =
 768     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 769 
 770   // Determine which regions need to grow in this expand_by call.
 771   // If you are growing in the lower region, high() must be in that
 772   // region so calculate the size based on high().  For the middle and
 773   // upper regions, determine the starting point of growth based on the
 774   // location of high().  By getting the MAX of the region's low address
 775   // (or the previous region's high address) and high(), we can tell if it
 776   // is an intra or inter region growth.
 777   size_t lower_needs = 0;
 778   if (aligned_lower_new_high > lower_high()) {
 779     lower_needs =
 780       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 781   }
 782   size_t middle_needs = 0;
 783   if (aligned_middle_new_high > middle_high()) {
 784     middle_needs =
 785       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 786   }
 787   size_t upper_needs = 0;
 788   if (aligned_upper_new_high > upper_high()) {
 789     upper_needs =
 790       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 791   }
 792 
 793   // Check contiguity.
 794   assert(low_boundary() <= lower_high() &&
 795          lower_high() <= lower_high_boundary(),
 796          "high address must be contained within the region");
 797   assert(lower_high_boundary() <= middle_high() &&
 798          middle_high() <= middle_high_boundary(),
 799          "high address must be contained within the region");
 800   assert(middle_high_boundary() <= upper_high() &&
 801          upper_high() <= upper_high_boundary(),
 802          "high address must be contained within the region");
 803 
 804   // Commit regions
 805   if (lower_needs > 0) {
 806     assert(low_boundary() <= lower_high() &&
 807            lower_high() + lower_needs <= lower_high_boundary(),
 808            "must not expand beyond region");
 809     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
 810       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 811                          ", lower_needs=" SIZE_FORMAT ", %d) failed",
 812                          lower_high(), lower_needs, _executable);)
 813       return false;
 814     } else {
 815       _lower_high += lower_needs;
 816     }
 817   }
 818   if (middle_needs > 0) {
 819     assert(lower_high_boundary() <= middle_high() &&
 820            middle_high() + middle_needs <= middle_high_boundary(),
 821            "must not expand beyond region");
 822     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
 823                            _executable)) {
 824       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 825                          ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
 826                          ", %d) failed", middle_high(), middle_needs,
 827                          middle_alignment(), _executable);)
 828       return false;
 829     }
 830     _middle_high += middle_needs;
 831   }
 832   if (upper_needs > 0) {
 833     assert(middle_high_boundary() <= upper_high() &&
 834            upper_high() + upper_needs <= upper_high_boundary(),
 835            "must not expand beyond region");
 836     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
 837       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 838                          ", upper_needs=" SIZE_FORMAT ", %d) failed",
 839                          upper_high(), upper_needs, _executable);)
 840       return false;
 841     } else {
 842       _upper_high += upper_needs;
 843     }
 844   }
 845 
 846   if (pre_touch || AlwaysPreTouch) {
 847     int vm_ps = os::vm_page_size();
 848     for (char* curr = previous_high;
 849          curr < unaligned_new_high;
 850          curr += vm_ps) {
 851       // Note the use of a write here; originally we tried just a read, but
 852       // since the value read was unused, the optimizer removed the read.
 853       // If we ever have a concurrent touchahead thread, we'll want to use
 854       // a read, to avoid the potential of overwriting data (if a mutator
 855       // thread beats the touchahead thread to a page).  There are various
 856       // ways of making sure this read is not optimized away: for example,
 857       // generating the code for a read procedure at runtime.
 858       *curr = 0;
 859     }
 860   }
 861 
 862   _high += bytes;
 863   return true;
 864 }
 865 
 866 // A page is uncommitted if the contents of the entire page is deemed unusable.
 867 // Continue to decrement the high() pointer until it reaches a page boundary
 868 // in which case that particular page can now be uncommitted.
 869 void VirtualSpace::shrink_by(size_t size) {
 870   if (committed_size() < size)
 871     fatal("Cannot shrink virtual space to negative size");
 872 
 873   if (special()) {
 874     // don't uncommit if the entire space is pinned in memory
 875     _high -= size;
 876     return;
 877   }
 878 
 879   char* unaligned_new_high = high() - size;
 880   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 881 
 882   // Calculate new unaligned address
 883   char* unaligned_upper_new_high =
 884     MAX2(unaligned_new_high, middle_high_boundary());
 885   char* unaligned_middle_new_high =
 886     MAX2(unaligned_new_high, lower_high_boundary());
 887   char* unaligned_lower_new_high =
 888     MAX2(unaligned_new_high, low_boundary());
 889 
 890   // Align address to region's alignment
 891   char* aligned_upper_new_high =
 892     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 893   char* aligned_middle_new_high =
 894     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 895   char* aligned_lower_new_high =
 896     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 897 
 898   // Determine which regions need to shrink
 899   size_t upper_needs = 0;
 900   if (aligned_upper_new_high < upper_high()) {
 901     upper_needs =
 902       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 903   }
 904   size_t middle_needs = 0;
 905   if (aligned_middle_new_high < middle_high()) {
 906     middle_needs =
 907       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 908   }
 909   size_t lower_needs = 0;
 910   if (aligned_lower_new_high < lower_high()) {
 911     lower_needs =
 912       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 913   }
 914 
 915   // Check contiguity.
 916   assert(middle_high_boundary() <= upper_high() &&
 917          upper_high() <= upper_high_boundary(),
 918          "high address must be contained within the region");
 919   assert(lower_high_boundary() <= middle_high() &&
 920          middle_high() <= middle_high_boundary(),
 921          "high address must be contained within the region");
 922   assert(low_boundary() <= lower_high() &&
 923          lower_high() <= lower_high_boundary(),
 924          "high address must be contained within the region");
 925 
 926   // Uncommit
 927   if (upper_needs > 0) {
 928     assert(middle_high_boundary() <= aligned_upper_new_high &&
 929            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 930            "must not shrink beyond region");
 931     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 932       debug_only(warning("os::uncommit_memory failed"));
 933       return;
 934     } else {
 935       _upper_high -= upper_needs;
 936     }
 937   }
 938   if (middle_needs > 0) {
 939     assert(lower_high_boundary() <= aligned_middle_new_high &&
 940            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 941            "must not shrink beyond region");
 942     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 943       debug_only(warning("os::uncommit_memory failed"));
 944       return;
 945     } else {
 946       _middle_high -= middle_needs;
 947     }
 948   }
 949   if (lower_needs > 0) {
 950     assert(low_boundary() <= aligned_lower_new_high &&
 951            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 952            "must not shrink beyond region");
 953     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 954       debug_only(warning("os::uncommit_memory failed"));
 955       return;
 956     } else {
 957       _lower_high -= lower_needs;
 958     }
 959   }
 960 
 961   _high -= size;
 962 }
 963 
 964 #ifndef PRODUCT
 965 void VirtualSpace::check_for_contiguity() {
 966   // Check contiguity.
 967   assert(low_boundary() <= lower_high() &&
 968          lower_high() <= lower_high_boundary(),
 969          "high address must be contained within the region");
 970   assert(lower_high_boundary() <= middle_high() &&
 971          middle_high() <= middle_high_boundary(),
 972          "high address must be contained within the region");
 973   assert(middle_high_boundary() <= upper_high() &&
 974          upper_high() <= upper_high_boundary(),
 975          "high address must be contained within the region");
 976   assert(low() >= low_boundary(), "low");
 977   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
 978   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
 979   assert(high() <= upper_high(), "upper high");
 980 }
 981 
 982 void VirtualSpace::print_on(outputStream* out) {
 983   out->print   ("Virtual space:");
 984   if (special()) out->print(" (pinned in memory)");
 985   out->cr();
 986   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
 987   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
 988   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
 989   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
 990 }
 991 
 992 void VirtualSpace::print() {
 993   print_on(tty);
 994 }
 995 
 996 /////////////// Unit tests ///////////////
 997 
 998 #ifndef PRODUCT
 999 
1000 #define test_log(...) \
1001   do {\
1002     if (VerboseInternalVMTests) { \
1003       tty->print_cr(__VA_ARGS__); \
1004       tty->flush(); \
1005     }\
1006   } while (false)
1007 
1008 class TestReservedSpace : AllStatic {
1009  public:
1010   static void small_page_write(void* addr, size_t size) {
1011     size_t page_size = os::vm_page_size();
1012 
1013     char* end = (char*)addr + size;
1014     for (char* p = (char*)addr; p < end; p += page_size) {
1015       *p = 1;
1016     }
1017   }
1018 
1019   static void release_memory_for_test(ReservedSpace rs) {
1020     if (rs.special()) {
1021       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1022     } else {
1023       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1024     }
1025   }
1026 
1027   static void test_reserved_space1(size_t size, size_t alignment) {
1028     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
1029 
1030     assert(is_size_aligned(size, alignment), "Incorrect input parameters");
1031 
1032     ReservedSpace rs(size,          // size
1033                      alignment,     // alignment
1034                      UseLargePages, // large
1035                      (char *)NULL); // requested_address
1036 
1037     test_log(" rs.special() == %d", rs.special());
1038 
1039     assert(rs.base() != NULL, "Must be");
1040     assert(rs.size() == size, "Must be");
1041 
1042     assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1043     assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1044 
1045     if (rs.special()) {
1046       small_page_write(rs.base(), size);
1047     }
1048 
1049     release_memory_for_test(rs);
1050   }
1051 
1052   static void test_reserved_space2(size_t size) {
1053     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
1054 
1055     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1056 
1057     ReservedSpace rs(size);
1058 
1059     test_log(" rs.special() == %d", rs.special());
1060 
1061     assert(rs.base() != NULL, "Must be");
1062     assert(rs.size() == size, "Must be");
1063 
1064     if (rs.special()) {
1065       small_page_write(rs.base(), size);
1066     }
1067 
1068     release_memory_for_test(rs);
1069   }
1070 
1071   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1072     test_log("test_reserved_space3(%p, %p, %d)",
1073         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
1074 
1075     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1076     assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
1077 
1078     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1079 
1080     ReservedSpace rs(size, alignment, large, false);
1081 
1082     test_log(" rs.special() == %d", rs.special());
1083 
1084     assert(rs.base() != NULL, "Must be");
1085     assert(rs.size() == size, "Must be");
1086 
1087     if (rs.special()) {
1088       small_page_write(rs.base(), size);
1089     }
1090 
1091     release_memory_for_test(rs);
1092   }
1093 
1094 
1095   static void test_reserved_space1() {
1096     size_t size = 2 * 1024 * 1024;
1097     size_t ag   = os::vm_allocation_granularity();
1098 
1099     test_reserved_space1(size,      ag);
1100     test_reserved_space1(size * 2,  ag);
1101     test_reserved_space1(size * 10, ag);
1102   }
1103 
1104   static void test_reserved_space2() {
1105     size_t size = 2 * 1024 * 1024;
1106     size_t ag = os::vm_allocation_granularity();
1107 
1108     test_reserved_space2(size * 1);
1109     test_reserved_space2(size * 2);
1110     test_reserved_space2(size * 10);
1111     test_reserved_space2(ag);
1112     test_reserved_space2(size - ag);
1113     test_reserved_space2(size);
1114     test_reserved_space2(size + ag);
1115     test_reserved_space2(size * 2);
1116     test_reserved_space2(size * 2 - ag);
1117     test_reserved_space2(size * 2 + ag);
1118     test_reserved_space2(size * 3);
1119     test_reserved_space2(size * 3 - ag);
1120     test_reserved_space2(size * 3 + ag);
1121     test_reserved_space2(size * 10);
1122     test_reserved_space2(size * 10 + size / 2);
1123   }
1124 
1125   static void test_reserved_space3() {
1126     size_t ag = os::vm_allocation_granularity();
1127 
1128     test_reserved_space3(ag,      ag    , false);
1129     test_reserved_space3(ag * 2,  ag    , false);
1130     test_reserved_space3(ag * 3,  ag    , false);
1131     test_reserved_space3(ag * 2,  ag * 2, false);
1132     test_reserved_space3(ag * 4,  ag * 2, false);
1133     test_reserved_space3(ag * 8,  ag * 2, false);
1134     test_reserved_space3(ag * 4,  ag * 4, false);
1135     test_reserved_space3(ag * 8,  ag * 4, false);
1136     test_reserved_space3(ag * 16, ag * 4, false);
1137 
1138     if (UseLargePages) {
1139       size_t lp = os::large_page_size();
1140 
1141       // Without large pages
1142       test_reserved_space3(lp,     ag * 4, false);
1143       test_reserved_space3(lp * 2, ag * 4, false);
1144       test_reserved_space3(lp * 4, ag * 4, false);
1145       test_reserved_space3(lp,     lp    , false);
1146       test_reserved_space3(lp * 2, lp    , false);
1147       test_reserved_space3(lp * 3, lp    , false);
1148       test_reserved_space3(lp * 2, lp * 2, false);
1149       test_reserved_space3(lp * 4, lp * 2, false);
1150       test_reserved_space3(lp * 8, lp * 2, false);
1151 
1152       // With large pages
1153       test_reserved_space3(lp, ag * 4    , true);
1154       test_reserved_space3(lp * 2, ag * 4, true);
1155       test_reserved_space3(lp * 4, ag * 4, true);
1156       test_reserved_space3(lp, lp        , true);
1157       test_reserved_space3(lp * 2, lp    , true);
1158       test_reserved_space3(lp * 3, lp    , true);
1159       test_reserved_space3(lp * 2, lp * 2, true);
1160       test_reserved_space3(lp * 4, lp * 2, true);
1161       test_reserved_space3(lp * 8, lp * 2, true);
1162     }
1163   }
1164 
1165   static void test_reserved_space() {
1166     test_reserved_space1();
1167     test_reserved_space2();
1168     test_reserved_space3();
1169   }
1170 };
1171 
1172 void TestReservedSpace_test() {
1173   TestReservedSpace::test_reserved_space();
1174 }
1175 
1176 #define assert_equals(actual, expected)     \
1177   assert(actual == expected,                \
1178     err_msg("Got " SIZE_FORMAT " expected " \
1179       SIZE_FORMAT, actual, expected));
1180 
1181 #define assert_ge(value1, value2)                  \
1182   assert(value1 >= value2,                         \
1183     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
1184       #value2 "': " SIZE_FORMAT, value1, value2));
1185 
1186 #define assert_lt(value1, value2)                  \
1187   assert(value1 < value2,                          \
1188     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
1189       #value2 "': " SIZE_FORMAT, value1, value2));
1190 
1191 
1192 class TestVirtualSpace : AllStatic {
1193   enum TestLargePages {
1194     Default,
1195     Disable,
1196     Reserve,
1197     Commit
1198   };
1199 
1200   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
1201     switch(mode) {
1202     default:
1203     case Default:
1204     case Reserve:
1205       return ReservedSpace(reserve_size_aligned);
1206     case Disable:
1207     case Commit:
1208       return ReservedSpace(reserve_size_aligned,
1209                            os::vm_allocation_granularity(),
1210                            /* large */ false, /* exec */ false);
1211     }
1212   }
1213 
1214   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1215     switch(mode) {
1216     default:
1217     case Default:
1218     case Reserve:
1219       return vs.initialize(rs, 0);
1220     case Disable:
1221       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1222     case Commit:
1223       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region(rs.size(), 1));
1224     }
1225   }
1226 
1227  public:
1228   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1229                                                         TestLargePages mode = Default) {
1230     size_t granularity = os::vm_allocation_granularity();
1231     size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
1232 
1233     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1234 
1235     assert(reserved.is_reserved(), "Must be");
1236 
1237     VirtualSpace vs;
1238     bool initialized = initialize_virtual_space(vs, reserved, mode);
1239     assert(initialized, "Failed to initialize VirtualSpace");
1240 
1241     vs.expand_by(commit_size, false);
1242 
1243     if (vs.special()) {
1244       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1245     } else {
1246       assert_ge(vs.actual_committed_size(), commit_size);
1247       // Approximate the commit granularity.
1248       // Make sure that we don't commit using large pages
1249       // if large pages has been disabled for this VirtualSpace.
1250       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1251                                    os::vm_page_size() : os::large_page_size();
1252       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1253     }
1254 
1255     reserved.release();
1256   }
1257 
1258   static void test_virtual_space_actual_committed_space_one_large_page() {
1259     if (!UseLargePages) {
1260       return;
1261     }
1262 
1263     size_t large_page_size = os::large_page_size();
1264 
1265     ReservedSpace reserved(large_page_size, large_page_size, true, false);
1266 
1267     assert(reserved.is_reserved(), "Must be");
1268 
1269     VirtualSpace vs;
1270     bool initialized = vs.initialize(reserved, 0);
1271     assert(initialized, "Failed to initialize VirtualSpace");
1272 
1273     vs.expand_by(large_page_size, false);
1274 
1275     assert_equals(vs.actual_committed_size(), large_page_size);
1276 
1277     reserved.release();
1278   }
1279 
1280   static void test_virtual_space_actual_committed_space() {
1281     test_virtual_space_actual_committed_space(4 * K, 0);
1282     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1283     test_virtual_space_actual_committed_space(8 * K, 0);
1284     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1285     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1286     test_virtual_space_actual_committed_space(12 * K, 0);
1287     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1288     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1289     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1290     test_virtual_space_actual_committed_space(64 * K, 0);
1291     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1292     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1293     test_virtual_space_actual_committed_space(2 * M, 0);
1294     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1295     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1296     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1297     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1298     test_virtual_space_actual_committed_space(10 * M, 0);
1299     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1300     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1301     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1302     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1303     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1304     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1305   }
1306 
1307   static void test_virtual_space_disable_large_pages() {
1308     if (!UseLargePages) {
1309       return;
1310     }
1311     // These test cases verify that if we force VirtualSpace to disable large pages
1312     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1313     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1314     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1315     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1316     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1317     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1318     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1319 
1320     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1321     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1322     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1323     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1324     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1325     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1326     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1327 
1328     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1329     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1330     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1331     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1332     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1333     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1334     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1335   }
1336 
1337   static void test_virtual_space() {
1338     test_virtual_space_actual_committed_space();
1339     test_virtual_space_actual_committed_space_one_large_page();
1340     test_virtual_space_disable_large_pages();
1341   }
1342 };
1343 
1344 void TestVirtualSpace_test() {
1345   TestVirtualSpace::test_virtual_space();
1346 }
1347 
1348 #endif // PRODUCT
1349 
1350 #endif