1 /*
   2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "oops/markOop.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "runtime/virtualspace.hpp"
  29 #include "services/memTracker.hpp"
  30 
  31 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  32 
  33 // ReservedSpace
  34 
  35 // Dummy constructor
  36 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  37     _alignment(0), _special(false), _executable(false) {
  38 }
  39 
  40 ReservedSpace::ReservedSpace(size_t size) {
  41   size_t page_size = os::page_size_for_region(size, 1);
  42   bool large_pages = page_size != (size_t)os::vm_page_size();
  43   // Don't force the alignment to be large page aligned,
  44   // since that will waste memory.
  45   size_t alignment = os::vm_allocation_granularity();
  46   initialize(size, alignment, large_pages, NULL, false);
  47 }
  48 
  49 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  50                              bool large,
  51                              char* requested_address) {
  52   initialize(size, alignment, large, requested_address, false);
  53 }
  54 
  55 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  56                              bool large,
  57                              bool executable) {
  58   initialize(size, alignment, large, NULL, executable);
  59 }
  60 
  61 // Helper method.
  62 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  63                                            const size_t size, bool special)
  64 {
  65   if (base == requested_address || requested_address == NULL)
  66     return false; // did not fail
  67 
  68   if (base != NULL) {
  69     // Different reserve address may be acceptable in other cases
  70     // but for compressed oops heap should be at requested address.
  71     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  72     if (PrintCompressedOopsMode) {
  73       tty->cr();
  74       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
  75     }
  76     // OS ignored requested address. Try different address.
  77     if (special) {
  78       if (!os::release_memory_special(base, size)) {
  79         fatal("os::release_memory_special failed");
  80       }
  81     } else {
  82       if (!os::release_memory(base, size)) {
  83         fatal("os::release_memory failed");
  84       }
  85     }
  86   }
  87   return true;
  88 }
  89 
  90 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
  91                                char* requested_address,
  92                                bool executable) {
  93   const size_t granularity = os::vm_allocation_granularity();
  94   assert((size & (granularity - 1)) == 0,
  95          "size not aligned to os::vm_allocation_granularity()");
  96   assert((alignment & (granularity - 1)) == 0,
  97          "alignment not aligned to os::vm_allocation_granularity()");
  98   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
  99          "not a power of 2");
 100 
 101   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 102 
 103   _base = NULL;
 104   _size = 0;
 105   _special = false;
 106   _executable = executable;
 107   _alignment = 0;
 108   _noaccess_prefix = 0;
 109   if (size == 0) {
 110     return;
 111   }
 112 
 113   // If OS doesn't support demand paging for large page memory, we need
 114   // to use reserve_memory_special() to reserve and pin the entire region.
 115   bool special = large && !os::can_commit_large_page_memory();
 116   char* base = NULL;
 117 
 118   if (special) {
 119 
 120     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 121 
 122     if (base != NULL) {
 123       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 124         // OS ignored requested address. Try different address.
 125         return;
 126       }
 127       // Check alignment constraints.
 128       assert((uintptr_t) base % alignment == 0,
 129              err_msg("Large pages returned a non-aligned address, base: "
 130                  PTR_FORMAT " alignment: " PTR_FORMAT,
 131                  base, (void*)(uintptr_t)alignment));
 132       _special = true;
 133     } else {
 134       // failed; try to reserve regular memory below
 135       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 136                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 137         if (PrintCompressedOopsMode) {
 138           tty->cr();
 139           tty->print_cr("Reserve regular memory without large pages.");
 140         }
 141       }
 142     }
 143   }
 144 
 145   if (base == NULL) {
 146     // Optimistically assume that the OSes returns an aligned base pointer.
 147     // When reserving a large address range, most OSes seem to align to at
 148     // least 64K.
 149 
 150     // If the memory was requested at a particular address, use
 151     // os::attempt_reserve_memory_at() to avoid over mapping something
 152     // important.  If available space is not detected, return NULL.
 153 
 154     if (requested_address != 0) {
 155       base = os::attempt_reserve_memory_at(size, requested_address);
 156       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 157         // OS ignored requested address. Try different address.
 158         base = NULL;
 159       }
 160     } else {
 161       base = os::reserve_memory(size, NULL, alignment);
 162     }
 163 
 164     if (base == NULL) return;
 165 
 166     // Check alignment constraints
 167     if ((((size_t)base) & (alignment - 1)) != 0) {
 168       // Base not aligned, retry
 169       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 170       // Make sure that size is aligned
 171       size = align_size_up(size, alignment);
 172       base = os::reserve_memory_aligned(size, alignment);
 173 
 174       if (requested_address != 0 &&
 175           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 176         // As a result of the alignment constraints, the allocated base differs
 177         // from the requested address. Return back to the caller who can
 178         // take remedial action (like try again without a requested address).
 179         assert(_base == NULL, "should be");
 180         return;
 181       }
 182     }
 183   }
 184   // Done
 185   _base = base;
 186   _size = size;
 187   _alignment = alignment;
 188 
 189   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 190          "area must be distinguishable from marks for mark-sweep");
 191   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 192          "area must be distinguishable from marks for mark-sweep");
 193 }
 194 
 195 
 196 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 197                              bool special, bool executable) {
 198   assert((size % os::vm_allocation_granularity()) == 0,
 199          "size not allocation aligned");
 200   _base = base;
 201   _size = size;
 202   _alignment = alignment;
 203   _noaccess_prefix = 0;
 204   _special = special;
 205   _executable = executable;
 206 }
 207 
 208 
 209 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 210                                         bool split, bool realloc) {
 211   assert(partition_size <= size(), "partition failed");
 212   if (split) {
 213     os::split_reserved_memory(base(), size(), partition_size, realloc);
 214   }
 215   ReservedSpace result(base(), partition_size, alignment, special(),
 216                        executable());
 217   return result;
 218 }
 219 
 220 
 221 ReservedSpace
 222 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 223   assert(partition_size <= size(), "partition failed");
 224   ReservedSpace result(base() + partition_size, size() - partition_size,
 225                        alignment, special(), executable());
 226   return result;
 227 }
 228 
 229 
 230 size_t ReservedSpace::page_align_size_up(size_t size) {
 231   return align_size_up(size, os::vm_page_size());
 232 }
 233 
 234 
 235 size_t ReservedSpace::page_align_size_down(size_t size) {
 236   return align_size_down(size, os::vm_page_size());
 237 }
 238 
 239 
 240 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 241   return align_size_up(size, os::vm_allocation_granularity());
 242 }
 243 
 244 
 245 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 246   return align_size_down(size, os::vm_allocation_granularity());
 247 }
 248 
 249 
 250 void ReservedSpace::release() {
 251   if (is_reserved()) {
 252     char *real_base = _base - _noaccess_prefix;
 253     const size_t real_size = _size + _noaccess_prefix;
 254     if (special()) {
 255       os::release_memory_special(real_base, real_size);
 256     } else{
 257       os::release_memory(real_base, real_size);
 258     }
 259     _base = NULL;
 260     _size = 0;
 261     _noaccess_prefix = 0;
 262     _alignment = 0;
 263     _special = false;
 264     _executable = false;
 265   }
 266 }
 267 
 268 static size_t noaccess_prefix_size(size_t alignment) {
 269   return lcm(os::vm_page_size(), alignment);
 270 }
 271 
 272 void ReservedSpace::establish_noaccess_prefix() {
 273   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 274 
 275   // ...
 276   _noaccess_prefix = noaccess_prefix_size(_alignment);
 277 
 278   if (true
 279       WIN64_ONLY(&& !UseLargePages)
 280       AIX_ONLY(&& os::vm_page_size() != SIZE_64K)) {
 281     // Protect memory at the base of the allocated region.
 282     // If special, the page was committed (only matters on windows)
 283     if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 284       fatal("cannot protect protection page");
 285     }
 286     if (PrintCompressedOopsMode) {
 287       tty->cr();
 288       tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
 289     }
 290     assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
 291   } else {
 292     Universe::set_narrow_oop_use_implicit_null_checks(false);
 293   }
 294 
 295   _base += _noaccess_prefix;
 296   _size -= _noaccess_prefix;
 297   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 298 }
 299 
 300 
 301 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 302 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 303 // might still fulfill the wishes of the caller.
 304 // Assures the memory is aligned to 'alignment'.
 305 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 306 void ReservedHeapSpace::try_reserve_heap(size_t size, size_t alignment, bool large, char* requested_address) {
 307   if (_base != NULL) {
 308     // We tried before, but we didn't like the address delivered.
 309     release();
 310   }
 311 
 312   // If OS doesn't support demand paging for large page memory, we need
 313   // to use reserve_memory_special() to reserve and pin the entire region.
 314   bool special = large && !os::can_commit_large_page_memory();
 315   char* base = NULL;
 316 
 317   if (PrintCompressedOopsMode && Verbose) {
 318     tty->print("Trying to allocate at address " PTR_FORMAT " size" PTR_FORMAT ".\n",
 319                requested_address, (address)size);
 320   }
 321 
 322   if (special) {
 323     base = os::reserve_memory_special(size, alignment, requested_address, false);
 324 
 325     if (base != NULL) {
 326       // Check alignment constraints.
 327       assert((uintptr_t) base % alignment == 0,
 328              err_msg("Large pages returned a non-aligned address, base: "
 329                      PTR_FORMAT " alignment: " PTR_FORMAT,
 330                      base, (void*)(uintptr_t)alignment));
 331       _special = true;
 332     }
 333   }
 334 
 335   if (base == NULL) {
 336     // Failed; try to reserve regular memory below
 337     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 338                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 339       if (PrintCompressedOopsMode) {
 340         tty->cr();
 341         tty->print_cr("Reserve regular memory without large pages.");
 342       }
 343     }
 344 
 345     // Optimistically assume that the OSes returns an aligned base pointer.
 346     // When reserving a large address range, most OSes seem to align to at
 347     // least 64K.
 348 
 349     // If the memory was requested at a particular address, use
 350     // os::attempt_reserve_memory_at() to avoid over mapping something
 351     // important.  If available space is not detected, return NULL.
 352 
 353     if (requested_address != 0) {
 354       base = os::attempt_reserve_memory_at(size, requested_address);
 355     } else {
 356       base = os::reserve_memory(size, NULL, alignment);
 357     }
 358   }
 359   if (base == NULL) { return; }
 360 
 361   // Done
 362   _base = base;
 363   _size = size;
 364   _alignment = alignment;
 365 
 366   // Check alignment constraints
 367   if ((((size_t)base) & (alignment - 1)) != 0) {
 368     // Base not aligned, retry.
 369     release();
 370   }
 371 }
 372 
 373 void ReservedHeapSpace::try_reserve_range(char *const highest_start, char *lowest_start, size_t attach_point_alignment,
 374                                           char *aligned_HBMA, char *upper_bound, size_t size, size_t alignment, bool large) {
 375   guarantee(HeapSearchSteps > 0, "Don't set HeapSearchSteps to 0");
 376 
 377   const size_t attach_range = highest_start - lowest_start;
 378   // Cap num_attempts at possible number.
 379   // At least one is possible even for 0 sized attach range.
 380   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
 381   const uint64_t num_attempts_to_try   = MIN2(HeapSearchSteps, num_attempts_possible);
 382 
 383   const size_t stepsize = align_size_up(attach_range / num_attempts_to_try, attach_point_alignment);
 384 
 385   // Try attach points from top to bottom.
 386   char* attach_point = highest_start;
 387   while (attach_point >= lowest_start  &&
 388          attach_point <= highest_start &&  // Avoid wrap around.
 389          ((_base == NULL) ||
 390           (_base < aligned_HBMA || _base + size > upper_bound))) {
 391     try_reserve_heap(size, alignment, large, attach_point);
 392     attach_point -= stepsize;
 393   }
 394 }
 395 
 396 void ReservedHeapSpace::initialize_compressed_heap(size_t size, size_t alignment, bool large) {
 397   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
 398             "can not allocate compressed oop heap for this size");
 399   guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
 400   assert(HeapBaseMinAddress > 0, "sanity");
 401 
 402   const size_t granularity = os::vm_allocation_granularity();
 403   assert((size & (granularity - 1)) == 0,
 404          "size not aligned to os::vm_allocation_granularity()");
 405   assert((alignment & (granularity - 1)) == 0,
 406          "alignment not aligned to os::vm_allocation_granularity()");
 407   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 408          "not a power of 2");
 409 
 410   // The necessary attach point alignment for generated wish addresses.
 411   // This is needed to increase the chance of attaching for mmap and shmat.
 412   const size_t os_attach_point_alignment =
 413     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
 414     NOT_AIX(os::vm_allocation_granularity());
 415   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 416 
 417   char *aligned_HBMA = (char *)align_ptr_up((void *)HeapBaseMinAddress, alignment);
 418   size_t noaccess_prefix = ((aligned_HBMA + size) > (char*)OopEncodingHeapMax) ? noaccess_prefix_size(alignment) : 0;
 419 
 420   // Attempt to alloc at user-given address.
 421   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 422     if (PrintCompressedOopsMode && Verbose) {
 423       tty->print(" == H E A P B A S E M I N A D D R E S S ==\n");
 424     }
 425     try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_HBMA);
 426     if (_base != aligned_HBMA) { // Enforce this exact address.
 427       release();
 428     }
 429   }
 430 
 431   // Keep heap at HeapBaseMinAddress.
 432   if (_base == NULL) {
 433 
 434     // Try to allocate the heap at addresses that allow efficient oop compression.
 435     // Different schemes are tried, in order of decreasing optimization potential.
 436     //
 437     // For this, try_reserve_heap() is called with the desired heap base addresses.
 438     // A call into the os layer to allocate at a given address can return memory
 439     // at a different address than requested.  Still, this might be memory at a useful
 440     // address. try_reserve_heap() always returns this allocated memory, as only here
 441     // the criteria for a good heap are checked.
 442 
 443     if (PrintCompressedOopsMode && Verbose) {
 444       tty->print(" == U N S C A L E D ==\n");
 445     }
 446 
 447     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 448     // Give it several tries from top of range to bottom.
 449     if (aligned_HBMA + size <= (char *)UnscaledOopHeapMax) {
 450 
 451       // Calc address range within we try to attach (range of possible start addresses).
 452       char* const highest_start = (char *)align_ptr_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 453       char* const lowest_start  = (char *)align_ptr_up  (        aligned_HBMA             , attach_point_alignment);
 454       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 455                         aligned_HBMA, (char *)UnscaledOopHeapMax, size, alignment, large);
 456     }
 457 
 458     if (PrintCompressedOopsMode && Verbose) {
 459       tty->print(" == Z E R O B A S E D ==\n");
 460     }
 461 
 462     // zerobased: Attempt to allocate in the lower 32G.
 463     // But leave room for the compressed class pointers, which is allocated above
 464     // the heap.
 465     char *zerobased_max = (char *)OopEncodingHeapMax;
 466     // For small heaps, save some space for compressed class pointer
 467     // space so it can be decoded with no base.
 468     if (UseCompressedClassPointers && !UseSharedSpaces &&
 469         OopEncodingHeapMax <= KlassEncodingMetaspaceMax) {
 470       const size_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
 471       zerobased_max = (char *)OopEncodingHeapMax - class_space;
 472     }
 473 
 474     // Give it several tries from top of range to bottom.
 475     if (aligned_HBMA + size <= zerobased_max &&    // Zerobased theoretical possible.
 476         ((_base == NULL) ||                        // No previous try succeeded.
 477          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 478 
 479       // Calc address range within we try to attach (range of possible start addresses).
 480       char *const highest_start = (char *)align_ptr_down(zerobased_max - size, attach_point_alignment);
 481       // SS10 and SS12u1 cannot compile "(char *)UnscaledOopHeapMax - size" on solaris sparc 32-bit:
 482       // "Cannot use int to initialize char*." Introduce aux variable.
 483       char *unscaled_end = (char *)UnscaledOopHeapMax;
 484       unscaled_end -= size;
 485       char *lowest_start = (size < UnscaledOopHeapMax) ? MAX2(unscaled_end, aligned_HBMA) : aligned_HBMA;
 486       lowest_start  = (char *)align_ptr_up(lowest_start, attach_point_alignment);
 487       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 488                         aligned_HBMA, zerobased_max, size, alignment, large);
 489     }
 490 
 491     if (PrintCompressedOopsMode && Verbose) {
 492       tty->print(" == D I S J O I N T B A S E ==\n");
 493     }
 494 
 495     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 496     // implement null checks.
 497     noaccess_prefix = noaccess_prefix_size(alignment);
 498 
 499     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
 500     char** addresses = Universe::get_attach_addresses_for_disjoint_mode();
 501     int i = 0;
 502     while (addresses[i] &&
 503            ((_base == NULL) ||
 504             (_base + size > (char *)OopEncodingHeapMax &&
 505              !Universe::is_disjoint_heap_base_address((address)_base)))) {
 506       char* const attach_point = addresses[i];
 507       assert(attach_point >= aligned_HBMA, "Flag support broken");
 508       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 509       i++;
 510     }
 511 
 512     if (PrintCompressedOopsMode && Verbose) {
 513       tty->print(" == H E A P B A S E D ==\n");
 514     }
 515 
 516     // Last, desperate try without any placement.
 517     if (_base == NULL) {
 518       if (PrintCompressedOopsMode && Verbose) {
 519         tty->print("Trying to allocate at address NULL size" PTR_FORMAT ".\n", (address)size);
 520       }
 521       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 522     }
 523   }
 524 
 525   assert(_base == NULL || markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 526          "area must be distinguishable from marks for mark-sweep");
 527   assert(_base == NULL || markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 528          "area must be distinguishable from marks for mark-sweep");
 529 }
 530 
 531 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() {
 532 
 533   if (size == 0) {
 534     return;
 535   }
 536 
 537   // Heap size should be aligned to alignment, too.
 538   guarantee(is_size_aligned(size, alignment), "set by caller");
 539 
 540   if (UseCompressedOops) {
 541     initialize_compressed_heap(size, alignment, large);
 542     if (base() && base() + size > (char *)OopEncodingHeapMax) {
 543       establish_noaccess_prefix();
 544     }
 545 
 546   } else {
 547     initialize(size, alignment, large, NULL, false);
 548   }
 549 
 550   if (base() > 0) {
 551     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 552   }
 553 }
 554 
 555 // Reserve space for code segment.  Same as Java heap only we mark this as
 556 // executable.
 557 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 558                                      size_t rs_align,
 559                                      bool large) :
 560   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 561   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 562 }
 563 
 564 // VirtualSpace
 565 
 566 VirtualSpace::VirtualSpace() {
 567   _low_boundary           = NULL;
 568   _high_boundary          = NULL;
 569   _low                    = NULL;
 570   _high                   = NULL;
 571   _lower_high             = NULL;
 572   _middle_high            = NULL;
 573   _upper_high             = NULL;
 574   _lower_high_boundary    = NULL;
 575   _middle_high_boundary   = NULL;
 576   _upper_high_boundary    = NULL;
 577   _lower_alignment        = 0;
 578   _middle_alignment       = 0;
 579   _upper_alignment        = 0;
 580   _special                = false;
 581   _executable             = false;
 582 }
 583 
 584 
 585 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 586   const size_t max_commit_granularity = os::page_size_for_region(rs.size(), 1);
 587   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 588 }
 589 
 590 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 591   if(!rs.is_reserved()) return false;  // allocation failed.
 592   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 593   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 594 
 595   _low_boundary  = rs.base();
 596   _high_boundary = low_boundary() + rs.size();
 597 
 598   _low = low_boundary();
 599   _high = low();
 600 
 601   _special = rs.special();
 602   _executable = rs.executable();
 603 
 604   // When a VirtualSpace begins life at a large size, make all future expansion
 605   // and shrinking occur aligned to a granularity of large pages.  This avoids
 606   // fragmentation of physical addresses that inhibits the use of large pages
 607   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 608   // page size, the only spaces that get handled this way are codecache and
 609   // the heap itself, both of which provide a substantial performance
 610   // boost in many benchmarks when covered by large pages.
 611   //
 612   // No attempt is made to force large page alignment at the very top and
 613   // bottom of the space if they are not aligned so already.
 614   _lower_alignment  = os::vm_page_size();
 615   _middle_alignment = max_commit_granularity;
 616   _upper_alignment  = os::vm_page_size();
 617 
 618   // End of each region
 619   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
 620   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
 621   _upper_high_boundary = high_boundary();
 622 
 623   // High address of each region
 624   _lower_high = low_boundary();
 625   _middle_high = lower_high_boundary();
 626   _upper_high = middle_high_boundary();
 627 
 628   // commit to initial size
 629   if (committed_size > 0) {
 630     if (!expand_by(committed_size)) {
 631       return false;
 632     }
 633   }
 634   return true;
 635 }
 636 
 637 
 638 VirtualSpace::~VirtualSpace() {
 639   release();
 640 }
 641 
 642 
 643 void VirtualSpace::release() {
 644   // This does not release memory it never reserved.
 645   // Caller must release via rs.release();
 646   _low_boundary           = NULL;
 647   _high_boundary          = NULL;
 648   _low                    = NULL;
 649   _high                   = NULL;
 650   _lower_high             = NULL;
 651   _middle_high            = NULL;
 652   _upper_high             = NULL;
 653   _lower_high_boundary    = NULL;
 654   _middle_high_boundary   = NULL;
 655   _upper_high_boundary    = NULL;
 656   _lower_alignment        = 0;
 657   _middle_alignment       = 0;
 658   _upper_alignment        = 0;
 659   _special                = false;
 660   _executable             = false;
 661 }
 662 
 663 
 664 size_t VirtualSpace::committed_size() const {
 665   return pointer_delta(high(), low(), sizeof(char));
 666 }
 667 
 668 
 669 size_t VirtualSpace::reserved_size() const {
 670   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 671 }
 672 
 673 
 674 size_t VirtualSpace::uncommitted_size()  const {
 675   return reserved_size() - committed_size();
 676 }
 677 
 678 size_t VirtualSpace::actual_committed_size() const {
 679   // Special VirtualSpaces commit all reserved space up front.
 680   if (special()) {
 681     return reserved_size();
 682   }
 683 
 684   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 685   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 686   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 687 
 688 #ifdef ASSERT
 689   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 690   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 691   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 692 
 693   if (committed_high > 0) {
 694     assert(committed_low == lower, "Must be");
 695     assert(committed_middle == middle, "Must be");
 696   }
 697 
 698   if (committed_middle > 0) {
 699     assert(committed_low == lower, "Must be");
 700   }
 701   if (committed_middle < middle) {
 702     assert(committed_high == 0, "Must be");
 703   }
 704 
 705   if (committed_low < lower) {
 706     assert(committed_high == 0, "Must be");
 707     assert(committed_middle == 0, "Must be");
 708   }
 709 #endif
 710 
 711   return committed_low + committed_middle + committed_high;
 712 }
 713 
 714 
 715 bool VirtualSpace::contains(const void* p) const {
 716   return low() <= (const char*) p && (const char*) p < high();
 717 }
 718 
 719 /*
 720    First we need to determine if a particular virtual space is using large
 721    pages.  This is done at the initialize function and only virtual spaces
 722    that are larger than LargePageSizeInBytes use large pages.  Once we
 723    have determined this, all expand_by and shrink_by calls must grow and
 724    shrink by large page size chunks.  If a particular request
 725    is within the current large page, the call to commit and uncommit memory
 726    can be ignored.  In the case that the low and high boundaries of this
 727    space is not large page aligned, the pages leading to the first large
 728    page address and the pages after the last large page address must be
 729    allocated with default pages.
 730 */
 731 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 732   if (uncommitted_size() < bytes) return false;
 733 
 734   if (special()) {
 735     // don't commit memory if the entire space is pinned in memory
 736     _high += bytes;
 737     return true;
 738   }
 739 
 740   char* previous_high = high();
 741   char* unaligned_new_high = high() + bytes;
 742   assert(unaligned_new_high <= high_boundary(),
 743          "cannot expand by more than upper boundary");
 744 
 745   // Calculate where the new high for each of the regions should be.  If
 746   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 747   // then the unaligned lower and upper new highs would be the
 748   // lower_high() and upper_high() respectively.
 749   char* unaligned_lower_new_high =
 750     MIN2(unaligned_new_high, lower_high_boundary());
 751   char* unaligned_middle_new_high =
 752     MIN2(unaligned_new_high, middle_high_boundary());
 753   char* unaligned_upper_new_high =
 754     MIN2(unaligned_new_high, upper_high_boundary());
 755 
 756   // Align the new highs based on the regions alignment.  lower and upper
 757   // alignment will always be default page size.  middle alignment will be
 758   // LargePageSizeInBytes if the actual size of the virtual space is in
 759   // fact larger than LargePageSizeInBytes.
 760   char* aligned_lower_new_high =
 761     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 762   char* aligned_middle_new_high =
 763     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 764   char* aligned_upper_new_high =
 765     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 766 
 767   // Determine which regions need to grow in this expand_by call.
 768   // If you are growing in the lower region, high() must be in that
 769   // region so calculate the size based on high().  For the middle and
 770   // upper regions, determine the starting point of growth based on the
 771   // location of high().  By getting the MAX of the region's low address
 772   // (or the previous region's high address) and high(), we can tell if it
 773   // is an intra or inter region growth.
 774   size_t lower_needs = 0;
 775   if (aligned_lower_new_high > lower_high()) {
 776     lower_needs =
 777       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 778   }
 779   size_t middle_needs = 0;
 780   if (aligned_middle_new_high > middle_high()) {
 781     middle_needs =
 782       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 783   }
 784   size_t upper_needs = 0;
 785   if (aligned_upper_new_high > upper_high()) {
 786     upper_needs =
 787       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 788   }
 789 
 790   // Check contiguity.
 791   assert(low_boundary() <= lower_high() &&
 792          lower_high() <= lower_high_boundary(),
 793          "high address must be contained within the region");
 794   assert(lower_high_boundary() <= middle_high() &&
 795          middle_high() <= middle_high_boundary(),
 796          "high address must be contained within the region");
 797   assert(middle_high_boundary() <= upper_high() &&
 798          upper_high() <= upper_high_boundary(),
 799          "high address must be contained within the region");
 800 
 801   // Commit regions
 802   if (lower_needs > 0) {
 803     assert(low_boundary() <= lower_high() &&
 804            lower_high() + lower_needs <= lower_high_boundary(),
 805            "must not expand beyond region");
 806     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
 807       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 808                          ", lower_needs=" SIZE_FORMAT ", %d) failed",
 809                          lower_high(), lower_needs, _executable);)
 810       return false;
 811     } else {
 812       _lower_high += lower_needs;
 813     }
 814   }
 815   if (middle_needs > 0) {
 816     assert(lower_high_boundary() <= middle_high() &&
 817            middle_high() + middle_needs <= middle_high_boundary(),
 818            "must not expand beyond region");
 819     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
 820                            _executable)) {
 821       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 822                          ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
 823                          ", %d) failed", middle_high(), middle_needs,
 824                          middle_alignment(), _executable);)
 825       return false;
 826     }
 827     _middle_high += middle_needs;
 828   }
 829   if (upper_needs > 0) {
 830     assert(middle_high_boundary() <= upper_high() &&
 831            upper_high() + upper_needs <= upper_high_boundary(),
 832            "must not expand beyond region");
 833     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
 834       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 835                          ", upper_needs=" SIZE_FORMAT ", %d) failed",
 836                          upper_high(), upper_needs, _executable);)
 837       return false;
 838     } else {
 839       _upper_high += upper_needs;
 840     }
 841   }
 842 
 843   if (pre_touch || AlwaysPreTouch) {
 844     int vm_ps = os::vm_page_size();
 845     for (char* curr = previous_high;
 846          curr < unaligned_new_high;
 847          curr += vm_ps) {
 848       // Note the use of a write here; originally we tried just a read, but
 849       // since the value read was unused, the optimizer removed the read.
 850       // If we ever have a concurrent touchahead thread, we'll want to use
 851       // a read, to avoid the potential of overwriting data (if a mutator
 852       // thread beats the touchahead thread to a page).  There are various
 853       // ways of making sure this read is not optimized away: for example,
 854       // generating the code for a read procedure at runtime.
 855       *curr = 0;
 856     }
 857   }
 858 
 859   _high += bytes;
 860   return true;
 861 }
 862 
 863 // A page is uncommitted if the contents of the entire page is deemed unusable.
 864 // Continue to decrement the high() pointer until it reaches a page boundary
 865 // in which case that particular page can now be uncommitted.
 866 void VirtualSpace::shrink_by(size_t size) {
 867   if (committed_size() < size)
 868     fatal("Cannot shrink virtual space to negative size");
 869 
 870   if (special()) {
 871     // don't uncommit if the entire space is pinned in memory
 872     _high -= size;
 873     return;
 874   }
 875 
 876   char* unaligned_new_high = high() - size;
 877   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 878 
 879   // Calculate new unaligned address
 880   char* unaligned_upper_new_high =
 881     MAX2(unaligned_new_high, middle_high_boundary());
 882   char* unaligned_middle_new_high =
 883     MAX2(unaligned_new_high, lower_high_boundary());
 884   char* unaligned_lower_new_high =
 885     MAX2(unaligned_new_high, low_boundary());
 886 
 887   // Align address to region's alignment
 888   char* aligned_upper_new_high =
 889     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 890   char* aligned_middle_new_high =
 891     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 892   char* aligned_lower_new_high =
 893     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 894 
 895   // Determine which regions need to shrink
 896   size_t upper_needs = 0;
 897   if (aligned_upper_new_high < upper_high()) {
 898     upper_needs =
 899       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 900   }
 901   size_t middle_needs = 0;
 902   if (aligned_middle_new_high < middle_high()) {
 903     middle_needs =
 904       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 905   }
 906   size_t lower_needs = 0;
 907   if (aligned_lower_new_high < lower_high()) {
 908     lower_needs =
 909       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 910   }
 911 
 912   // Check contiguity.
 913   assert(middle_high_boundary() <= upper_high() &&
 914          upper_high() <= upper_high_boundary(),
 915          "high address must be contained within the region");
 916   assert(lower_high_boundary() <= middle_high() &&
 917          middle_high() <= middle_high_boundary(),
 918          "high address must be contained within the region");
 919   assert(low_boundary() <= lower_high() &&
 920          lower_high() <= lower_high_boundary(),
 921          "high address must be contained within the region");
 922 
 923   // Uncommit
 924   if (upper_needs > 0) {
 925     assert(middle_high_boundary() <= aligned_upper_new_high &&
 926            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 927            "must not shrink beyond region");
 928     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 929       debug_only(warning("os::uncommit_memory failed"));
 930       return;
 931     } else {
 932       _upper_high -= upper_needs;
 933     }
 934   }
 935   if (middle_needs > 0) {
 936     assert(lower_high_boundary() <= aligned_middle_new_high &&
 937            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 938            "must not shrink beyond region");
 939     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 940       debug_only(warning("os::uncommit_memory failed"));
 941       return;
 942     } else {
 943       _middle_high -= middle_needs;
 944     }
 945   }
 946   if (lower_needs > 0) {
 947     assert(low_boundary() <= aligned_lower_new_high &&
 948            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 949            "must not shrink beyond region");
 950     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 951       debug_only(warning("os::uncommit_memory failed"));
 952       return;
 953     } else {
 954       _lower_high -= lower_needs;
 955     }
 956   }
 957 
 958   _high -= size;
 959 }
 960 
 961 #ifndef PRODUCT
 962 void VirtualSpace::check_for_contiguity() {
 963   // Check contiguity.
 964   assert(low_boundary() <= lower_high() &&
 965          lower_high() <= lower_high_boundary(),
 966          "high address must be contained within the region");
 967   assert(lower_high_boundary() <= middle_high() &&
 968          middle_high() <= middle_high_boundary(),
 969          "high address must be contained within the region");
 970   assert(middle_high_boundary() <= upper_high() &&
 971          upper_high() <= upper_high_boundary(),
 972          "high address must be contained within the region");
 973   assert(low() >= low_boundary(), "low");
 974   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
 975   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
 976   assert(high() <= upper_high(), "upper high");
 977 }
 978 
 979 void VirtualSpace::print_on(outputStream* out) {
 980   out->print   ("Virtual space:");
 981   if (special()) out->print(" (pinned in memory)");
 982   out->cr();
 983   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
 984   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
 985   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
 986   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
 987 }
 988 
 989 void VirtualSpace::print() {
 990   print_on(tty);
 991 }
 992 
 993 /////////////// Unit tests ///////////////
 994 
 995 #ifndef PRODUCT
 996 
 997 #define test_log(...) \
 998   do {\
 999     if (VerboseInternalVMTests) { \
1000       tty->print_cr(__VA_ARGS__); \
1001       tty->flush(); \
1002     }\
1003   } while (false)
1004 
1005 class TestReservedSpace : AllStatic {
1006  public:
1007   static void small_page_write(void* addr, size_t size) {
1008     size_t page_size = os::vm_page_size();
1009 
1010     char* end = (char*)addr + size;
1011     for (char* p = (char*)addr; p < end; p += page_size) {
1012       *p = 1;
1013     }
1014   }
1015 
1016   static void release_memory_for_test(ReservedSpace rs) {
1017     if (rs.special()) {
1018       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1019     } else {
1020       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1021     }
1022   }
1023 
1024   static void test_reserved_space1(size_t size, size_t alignment) {
1025     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
1026 
1027     assert(is_size_aligned(size, alignment), "Incorrect input parameters");
1028 
1029     ReservedSpace rs(size,          // size
1030                      alignment,     // alignment
1031                      UseLargePages, // large
1032                      (char *)NULL); // requested_address
1033 
1034     test_log(" rs.special() == %d", rs.special());
1035 
1036     assert(rs.base() != NULL, "Must be");
1037     assert(rs.size() == size, "Must be");
1038 
1039     assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1040     assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1041 
1042     if (rs.special()) {
1043       small_page_write(rs.base(), size);
1044     }
1045 
1046     release_memory_for_test(rs);
1047   }
1048 
1049   static void test_reserved_space2(size_t size) {
1050     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
1051 
1052     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1053 
1054     ReservedSpace rs(size);
1055 
1056     test_log(" rs.special() == %d", rs.special());
1057 
1058     assert(rs.base() != NULL, "Must be");
1059     assert(rs.size() == size, "Must be");
1060 
1061     if (rs.special()) {
1062       small_page_write(rs.base(), size);
1063     }
1064 
1065     release_memory_for_test(rs);
1066   }
1067 
1068   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1069     test_log("test_reserved_space3(%p, %p, %d)",
1070         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
1071 
1072     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1073     assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
1074 
1075     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1076 
1077     ReservedSpace rs(size, alignment, large, false);
1078 
1079     test_log(" rs.special() == %d", rs.special());
1080 
1081     assert(rs.base() != NULL, "Must be");
1082     assert(rs.size() == size, "Must be");
1083 
1084     if (rs.special()) {
1085       small_page_write(rs.base(), size);
1086     }
1087 
1088     release_memory_for_test(rs);
1089   }
1090 
1091 
1092   static void test_reserved_space1() {
1093     size_t size = 2 * 1024 * 1024;
1094     size_t ag   = os::vm_allocation_granularity();
1095 
1096     test_reserved_space1(size,      ag);
1097     test_reserved_space1(size * 2,  ag);
1098     test_reserved_space1(size * 10, ag);
1099   }
1100 
1101   static void test_reserved_space2() {
1102     size_t size = 2 * 1024 * 1024;
1103     size_t ag = os::vm_allocation_granularity();
1104 
1105     test_reserved_space2(size * 1);
1106     test_reserved_space2(size * 2);
1107     test_reserved_space2(size * 10);
1108     test_reserved_space2(ag);
1109     test_reserved_space2(size - ag);
1110     test_reserved_space2(size);
1111     test_reserved_space2(size + ag);
1112     test_reserved_space2(size * 2);
1113     test_reserved_space2(size * 2 - ag);
1114     test_reserved_space2(size * 2 + ag);
1115     test_reserved_space2(size * 3);
1116     test_reserved_space2(size * 3 - ag);
1117     test_reserved_space2(size * 3 + ag);
1118     test_reserved_space2(size * 10);
1119     test_reserved_space2(size * 10 + size / 2);
1120   }
1121 
1122   static void test_reserved_space3() {
1123     size_t ag = os::vm_allocation_granularity();
1124 
1125     test_reserved_space3(ag,      ag    , false);
1126     test_reserved_space3(ag * 2,  ag    , false);
1127     test_reserved_space3(ag * 3,  ag    , false);
1128     test_reserved_space3(ag * 2,  ag * 2, false);
1129     test_reserved_space3(ag * 4,  ag * 2, false);
1130     test_reserved_space3(ag * 8,  ag * 2, false);
1131     test_reserved_space3(ag * 4,  ag * 4, false);
1132     test_reserved_space3(ag * 8,  ag * 4, false);
1133     test_reserved_space3(ag * 16, ag * 4, false);
1134 
1135     if (UseLargePages) {
1136       size_t lp = os::large_page_size();
1137 
1138       // Without large pages
1139       test_reserved_space3(lp,     ag * 4, false);
1140       test_reserved_space3(lp * 2, ag * 4, false);
1141       test_reserved_space3(lp * 4, ag * 4, false);
1142       test_reserved_space3(lp,     lp    , false);
1143       test_reserved_space3(lp * 2, lp    , false);
1144       test_reserved_space3(lp * 3, lp    , false);
1145       test_reserved_space3(lp * 2, lp * 2, false);
1146       test_reserved_space3(lp * 4, lp * 2, false);
1147       test_reserved_space3(lp * 8, lp * 2, false);
1148 
1149       // With large pages
1150       test_reserved_space3(lp, ag * 4    , true);
1151       test_reserved_space3(lp * 2, ag * 4, true);
1152       test_reserved_space3(lp * 4, ag * 4, true);
1153       test_reserved_space3(lp, lp        , true);
1154       test_reserved_space3(lp * 2, lp    , true);
1155       test_reserved_space3(lp * 3, lp    , true);
1156       test_reserved_space3(lp * 2, lp * 2, true);
1157       test_reserved_space3(lp * 4, lp * 2, true);
1158       test_reserved_space3(lp * 8, lp * 2, true);
1159     }
1160   }
1161 
1162   static void test_reserved_space() {
1163     test_reserved_space1();
1164     test_reserved_space2();
1165     test_reserved_space3();
1166   }
1167 };
1168 
1169 void TestReservedSpace_test() {
1170   TestReservedSpace::test_reserved_space();
1171 }
1172 
1173 #define assert_equals(actual, expected)     \
1174   assert(actual == expected,                \
1175     err_msg("Got " SIZE_FORMAT " expected " \
1176       SIZE_FORMAT, actual, expected));
1177 
1178 #define assert_ge(value1, value2)                  \
1179   assert(value1 >= value2,                         \
1180     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
1181       #value2 "': " SIZE_FORMAT, value1, value2));
1182 
1183 #define assert_lt(value1, value2)                  \
1184   assert(value1 < value2,                          \
1185     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
1186       #value2 "': " SIZE_FORMAT, value1, value2));
1187 
1188 
1189 class TestVirtualSpace : AllStatic {
1190   enum TestLargePages {
1191     Default,
1192     Disable,
1193     Reserve,
1194     Commit
1195   };
1196 
1197   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
1198     switch(mode) {
1199     default:
1200     case Default:
1201     case Reserve:
1202       return ReservedSpace(reserve_size_aligned);
1203     case Disable:
1204     case Commit:
1205       return ReservedSpace(reserve_size_aligned,
1206                            os::vm_allocation_granularity(),
1207                            /* large */ false, /* exec */ false);
1208     }
1209   }
1210 
1211   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1212     switch(mode) {
1213     default:
1214     case Default:
1215     case Reserve:
1216       return vs.initialize(rs, 0);
1217     case Disable:
1218       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1219     case Commit:
1220       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region(rs.size(), 1));
1221     }
1222   }
1223 
1224  public:
1225   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1226                                                         TestLargePages mode = Default) {
1227     size_t granularity = os::vm_allocation_granularity();
1228     size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
1229 
1230     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1231 
1232     assert(reserved.is_reserved(), "Must be");
1233 
1234     VirtualSpace vs;
1235     bool initialized = initialize_virtual_space(vs, reserved, mode);
1236     assert(initialized, "Failed to initialize VirtualSpace");
1237 
1238     vs.expand_by(commit_size, false);
1239 
1240     if (vs.special()) {
1241       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1242     } else {
1243       assert_ge(vs.actual_committed_size(), commit_size);
1244       // Approximate the commit granularity.
1245       // Make sure that we don't commit using large pages
1246       // if large pages has been disabled for this VirtualSpace.
1247       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1248                                    os::vm_page_size() : os::large_page_size();
1249       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1250     }
1251 
1252     reserved.release();
1253   }
1254 
1255   static void test_virtual_space_actual_committed_space_one_large_page() {
1256     if (!UseLargePages) {
1257       return;
1258     }
1259 
1260     size_t large_page_size = os::large_page_size();
1261 
1262     ReservedSpace reserved(large_page_size, large_page_size, true, false);
1263 
1264     assert(reserved.is_reserved(), "Must be");
1265 
1266     VirtualSpace vs;
1267     bool initialized = vs.initialize(reserved, 0);
1268     assert(initialized, "Failed to initialize VirtualSpace");
1269 
1270     vs.expand_by(large_page_size, false);
1271 
1272     assert_equals(vs.actual_committed_size(), large_page_size);
1273 
1274     reserved.release();
1275   }
1276 
1277   static void test_virtual_space_actual_committed_space() {
1278     test_virtual_space_actual_committed_space(4 * K, 0);
1279     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1280     test_virtual_space_actual_committed_space(8 * K, 0);
1281     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1282     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1283     test_virtual_space_actual_committed_space(12 * K, 0);
1284     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1285     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1286     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1287     test_virtual_space_actual_committed_space(64 * K, 0);
1288     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1289     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1290     test_virtual_space_actual_committed_space(2 * M, 0);
1291     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1292     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1293     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1294     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1295     test_virtual_space_actual_committed_space(10 * M, 0);
1296     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1297     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1298     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1299     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1300     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1301     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1302   }
1303 
1304   static void test_virtual_space_disable_large_pages() {
1305     if (!UseLargePages) {
1306       return;
1307     }
1308     // These test cases verify that if we force VirtualSpace to disable large pages
1309     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1310     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1311     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1312     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1313     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1314     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1315     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1316 
1317     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1318     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1319     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1320     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1321     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1322     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1323     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1324 
1325     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1326     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1327     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1328     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1329     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1330     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1331     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1332   }
1333 
1334   static void test_virtual_space() {
1335     test_virtual_space_actual_committed_space();
1336     test_virtual_space_actual_committed_space_one_large_page();
1337     test_virtual_space_disable_large_pages();
1338   }
1339 };
1340 
1341 void TestVirtualSpace_test() {
1342   TestVirtualSpace::test_virtual_space();
1343 }
1344 
1345 #endif // PRODUCT
1346 
1347 #endif