1 /*
   2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "oops/markOop.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "runtime/virtualspace.hpp"
  29 #include "services/memTracker.hpp"
  30 #ifdef TARGET_OS_FAMILY_linux
  31 # include "os_linux.inline.hpp"
  32 #endif
  33 #ifdef TARGET_OS_FAMILY_solaris
  34 # include "os_solaris.inline.hpp"
  35 #endif
  36 #ifdef TARGET_OS_FAMILY_windows
  37 # include "os_windows.inline.hpp"
  38 #endif
  39 #ifdef TARGET_OS_FAMILY_aix
  40 # include "os_aix.inline.hpp"
  41 #endif
  42 #ifdef TARGET_OS_FAMILY_bsd
  43 # include "os_bsd.inline.hpp"
  44 #endif
  45 
  46 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  47 
  48 // ReservedSpace
  49 
  50 // Dummy constructor
  51 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  52     _alignment(0), _special(false), _executable(false) {
  53 }
  54 
  55 ReservedSpace::ReservedSpace(size_t size) {
  56   size_t page_size = os::page_size_for_region(size, size, 1);
  57   bool large_pages = page_size != (size_t)os::vm_page_size();
  58   // Don't force the alignment to be large page aligned,
  59   // since that will waste memory.
  60   size_t alignment = os::vm_allocation_granularity();
  61   initialize(size, alignment, large_pages, NULL, 0, false);
  62 }
  63 
  64 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  65                              bool large,
  66                              char* requested_address,
  67                              const size_t noaccess_prefix) {
  68   initialize(size+noaccess_prefix, alignment, large, requested_address,
  69              noaccess_prefix, false);
  70 }
  71 
  72 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  73                              bool large,
  74                              bool executable) {
  75   initialize(size, alignment, large, NULL, 0, executable);
  76 }
  77 
  78 // Helper method.
  79 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  80                                            const size_t size, bool special)
  81 {
  82   if (base == requested_address || requested_address == NULL)
  83     return false; // did not fail
  84 
  85   if (base != NULL) {
  86     // Different reserve address may be acceptable in other cases
  87     // but for compressed oops heap should be at requested address.
  88     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  89     if (PrintCompressedOopsMode) {
  90       tty->cr();
  91       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
  92     }
  93     // OS ignored requested address. Try different address.
  94     if (special) {
  95       if (!os::release_memory_special(base, size)) {
  96         fatal("os::release_memory_special failed");
  97       }
  98     } else {
  99       if (!os::release_memory(base, size)) {
 100         fatal("os::release_memory failed");
 101       }
 102     }
 103   }
 104   return true;
 105 }
 106 
 107 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
 108                                char* requested_address,
 109                                const size_t noaccess_prefix,
 110                                bool executable) {
 111   const size_t granularity = os::vm_allocation_granularity();
 112   assert((size & (granularity - 1)) == 0,
 113          "size not aligned to os::vm_allocation_granularity()");
 114   assert((alignment & (granularity - 1)) == 0,
 115          "alignment not aligned to os::vm_allocation_granularity()");
 116   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 117          "not a power of 2");
 118 
 119   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 120 
 121   // Assert that if noaccess_prefix is used, it is the same as alignment.
 122   assert(noaccess_prefix == 0 ||
 123          noaccess_prefix == alignment, "noaccess prefix wrong");
 124 
 125   _base = NULL;
 126   _size = 0;
 127   _special = false;
 128   _executable = executable;
 129   _alignment = 0;
 130   _noaccess_prefix = 0;
 131   if (size == 0) {
 132     return;
 133   }
 134 
 135   // If OS doesn't support demand paging for large page memory, we need
 136   // to use reserve_memory_special() to reserve and pin the entire region.
 137   bool special = large && !os::can_commit_large_page_memory();
 138   char* base = NULL;
 139 
 140   if (requested_address != 0) {
 141     requested_address -= noaccess_prefix; // adjust requested address
 142     assert(requested_address != NULL, "huge noaccess prefix?");
 143   }
 144 
 145   if (special) {
 146 
 147     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 148 
 149     if (base != NULL) {
 150       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 151         // OS ignored requested address. Try different address.
 152         return;
 153       }
 154       // Check alignment constraints.
 155       assert((uintptr_t) base % alignment == 0,
 156              err_msg("Large pages returned a non-aligned address, base: "
 157                  PTR_FORMAT " alignment: " PTR_FORMAT,
 158                  base, (void*)(uintptr_t)alignment));
 159       _special = true;
 160     } else {
 161       // failed; try to reserve regular memory below
 162       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 163                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 164         if (PrintCompressedOopsMode) {
 165           tty->cr();
 166           tty->print_cr("Reserve regular memory without large pages.");
 167         }
 168       }
 169     }
 170   }
 171 
 172   if (base == NULL) {
 173     // Optimistically assume that the OSes returns an aligned base pointer.
 174     // When reserving a large address range, most OSes seem to align to at
 175     // least 64K.
 176 
 177     // If the memory was requested at a particular address, use
 178     // os::attempt_reserve_memory_at() to avoid over mapping something
 179     // important.  If available space is not detected, return NULL.
 180 
 181     if (requested_address != 0) {
 182       base = os::attempt_reserve_memory_at(size, requested_address);
 183       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 184         // OS ignored requested address. Try different address.
 185         base = NULL;
 186       }
 187     } else {
 188       base = os::reserve_memory(size, NULL, alignment);
 189     }
 190 
 191     if (base == NULL) return;
 192 
 193     // Check alignment constraints
 194     if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
 195       // Base not aligned, retry
 196       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 197       // Make sure that size is aligned
 198       size = align_size_up(size, alignment);
 199       base = os::reserve_memory_aligned(size, alignment);
 200 
 201       if (requested_address != 0 &&
 202           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 203         // As a result of the alignment constraints, the allocated base differs
 204         // from the requested address. Return back to the caller who can
 205         // take remedial action (like try again without a requested address).
 206         assert(_base == NULL, "should be");
 207         return;
 208       }
 209     }
 210   }
 211   // Done
 212   _base = base;
 213   _size = size;
 214   _alignment = alignment;
 215   _noaccess_prefix = noaccess_prefix;
 216 
 217   // Assert that if noaccess_prefix is used, it is the same as alignment.
 218   assert(noaccess_prefix == 0 ||
 219          noaccess_prefix == _alignment, "noaccess prefix wrong");
 220 
 221   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 222          "area must be distinguishable from marks for mark-sweep");
 223   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 224          "area must be distinguishable from marks for mark-sweep");
 225 }
 226 
 227 
 228 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 229                              bool special, bool executable) {
 230   assert((size % os::vm_allocation_granularity()) == 0,
 231          "size not allocation aligned");
 232   _base = base;
 233   _size = size;
 234   _alignment = alignment;
 235   _noaccess_prefix = 0;
 236   _special = special;
 237   _executable = executable;
 238 }
 239 
 240 
 241 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 242                                         bool split, bool realloc) {
 243   assert(partition_size <= size(), "partition failed");
 244   if (split) {
 245     os::split_reserved_memory(base(), size(), partition_size, realloc);
 246   }
 247   ReservedSpace result(base(), partition_size, alignment, special(),
 248                        executable());
 249   return result;
 250 }
 251 
 252 
 253 ReservedSpace
 254 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 255   assert(partition_size <= size(), "partition failed");
 256   ReservedSpace result(base() + partition_size, size() - partition_size,
 257                        alignment, special(), executable());
 258   return result;
 259 }
 260 
 261 
 262 size_t ReservedSpace::page_align_size_up(size_t size) {
 263   return align_size_up(size, os::vm_page_size());
 264 }
 265 
 266 
 267 size_t ReservedSpace::page_align_size_down(size_t size) {
 268   return align_size_down(size, os::vm_page_size());
 269 }
 270 
 271 
 272 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 273   return align_size_up(size, os::vm_allocation_granularity());
 274 }
 275 
 276 
 277 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 278   return align_size_down(size, os::vm_allocation_granularity());
 279 }
 280 
 281 
 282 void ReservedSpace::release() {
 283   if (is_reserved()) {
 284     char *real_base = _base - _noaccess_prefix;
 285     const size_t real_size = _size + _noaccess_prefix;
 286     if (special()) {
 287       os::release_memory_special(real_base, real_size);
 288     } else{
 289       os::release_memory(real_base, real_size);
 290     }
 291     _base = NULL;
 292     _size = 0;
 293     _noaccess_prefix = 0;
 294     _special = false;
 295     _executable = false;
 296   }
 297 }
 298 
 299 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
 300   assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
 301                                       (Universe::narrow_oop_base() != NULL) &&
 302                                       Universe::narrow_oop_use_implicit_null_checks()),
 303          "noaccess_prefix should be used only with non zero based compressed oops");
 304 
 305   // If there is no noaccess prefix, return.
 306   if (_noaccess_prefix == 0) return;
 307 
 308   assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
 309          "must be at least page size big");
 310 
 311   // Protect memory at the base of the allocated region.
 312   // If special, the page was committed (only matters on windows)
 313   if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
 314                           _special)) {
 315     fatal("cannot protect protection page");
 316   }
 317   if (PrintCompressedOopsMode) {
 318     tty->cr();
 319     tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
 320   }
 321 
 322   _base += _noaccess_prefix;
 323   _size -= _noaccess_prefix;
 324   assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
 325          "must be exactly of required size and alignment");
 326 }
 327 
 328 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
 329                                      bool large, char* requested_address) :
 330   ReservedSpace(size, alignment, large,
 331                 requested_address,
 332                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
 333                  Universe::narrow_oop_use_implicit_null_checks()) ?
 334                   lcm(os::vm_page_size(), alignment) : 0) {
 335   if (base() > 0) {
 336     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 337   }
 338 
 339   // Only reserved space for the java heap should have a noaccess_prefix
 340   // if using compressed oops.
 341   protect_noaccess_prefix(size);
 342 }
 343 
 344 // Reserve space for code segment.  Same as Java heap only we mark this as
 345 // executable.
 346 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 347                                      size_t rs_align,
 348                                      bool large) :
 349   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 350   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 351 }
 352 
 353 // VirtualSpace
 354 
 355 VirtualSpace::VirtualSpace() {
 356   _low_boundary           = NULL;
 357   _high_boundary          = NULL;
 358   _low                    = NULL;
 359   _high                   = NULL;
 360   _lower_high             = NULL;
 361   _middle_high            = NULL;
 362   _upper_high             = NULL;
 363   _lower_high_boundary    = NULL;
 364   _middle_high_boundary   = NULL;
 365   _upper_high_boundary    = NULL;
 366   _lower_alignment        = 0;
 367   _middle_alignment       = 0;
 368   _upper_alignment        = 0;
 369   _special                = false;
 370   _executable             = false;
 371 }
 372 
 373 
 374 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 375   const size_t max_commit_granularity = os::page_size_for_region(rs.size(), rs.size(), 1);
 376   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 377 }
 378 
 379 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 380   if(!rs.is_reserved()) return false;  // allocation failed.
 381   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 382   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 383 
 384   _low_boundary  = rs.base();
 385   _high_boundary = low_boundary() + rs.size();
 386 
 387   _low = low_boundary();
 388   _high = low();
 389 
 390   _special = rs.special();
 391   _executable = rs.executable();
 392 
 393   // When a VirtualSpace begins life at a large size, make all future expansion
 394   // and shrinking occur aligned to a granularity of large pages.  This avoids
 395   // fragmentation of physical addresses that inhibits the use of large pages
 396   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 397   // page size, the only spaces that get handled this way are codecache and
 398   // the heap itself, both of which provide a substantial performance
 399   // boost in many benchmarks when covered by large pages.
 400   //
 401   // No attempt is made to force large page alignment at the very top and
 402   // bottom of the space if they are not aligned so already.
 403   _lower_alignment  = os::vm_page_size();
 404   _middle_alignment = max_commit_granularity;
 405   _upper_alignment  = os::vm_page_size();
 406 
 407   // End of each region
 408   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
 409   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
 410   _upper_high_boundary = high_boundary();
 411 
 412   // High address of each region
 413   _lower_high = low_boundary();
 414   _middle_high = lower_high_boundary();
 415   _upper_high = middle_high_boundary();
 416 
 417   // commit to initial size
 418   if (committed_size > 0) {
 419     if (!expand_by(committed_size)) {
 420       return false;
 421     }
 422   }
 423   return true;
 424 }
 425 
 426 
 427 VirtualSpace::~VirtualSpace() {
 428   release();
 429 }
 430 
 431 
 432 void VirtualSpace::release() {
 433   // This does not release memory it never reserved.
 434   // Caller must release via rs.release();
 435   _low_boundary           = NULL;
 436   _high_boundary          = NULL;
 437   _low                    = NULL;
 438   _high                   = NULL;
 439   _lower_high             = NULL;
 440   _middle_high            = NULL;
 441   _upper_high             = NULL;
 442   _lower_high_boundary    = NULL;
 443   _middle_high_boundary   = NULL;
 444   _upper_high_boundary    = NULL;
 445   _lower_alignment        = 0;
 446   _middle_alignment       = 0;
 447   _upper_alignment        = 0;
 448   _special                = false;
 449   _executable             = false;
 450 }
 451 
 452 
 453 size_t VirtualSpace::committed_size() const {
 454   return pointer_delta(high(), low(), sizeof(char));
 455 }
 456 
 457 
 458 size_t VirtualSpace::reserved_size() const {
 459   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 460 }
 461 
 462 
 463 size_t VirtualSpace::uncommitted_size()  const {
 464   return reserved_size() - committed_size();
 465 }
 466 
 467 size_t VirtualSpace::actual_committed_size() const {
 468   // Special VirtualSpaces commit all reserved space up front.
 469   if (special()) {
 470     return reserved_size();
 471   }
 472 
 473   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 474   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 475   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 476 
 477 #ifdef ASSERT
 478   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 479   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 480   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 481 
 482   if (committed_high > 0) {
 483     assert(committed_low == lower, "Must be");
 484     assert(committed_middle == middle, "Must be");
 485   }
 486 
 487   if (committed_middle > 0) {
 488     assert(committed_low == lower, "Must be");
 489   }
 490   if (committed_middle < middle) {
 491     assert(committed_high == 0, "Must be");
 492   }
 493 
 494   if (committed_low < lower) {
 495     assert(committed_high == 0, "Must be");
 496     assert(committed_middle == 0, "Must be");
 497   }
 498 #endif
 499 
 500   return committed_low + committed_middle + committed_high;
 501 }
 502 
 503 
 504 bool VirtualSpace::contains(const void* p) const {
 505   return low() <= (const char*) p && (const char*) p < high();
 506 }
 507 
 508 /*
 509    First we need to determine if a particular virtual space is using large
 510    pages.  This is done at the initialize function and only virtual spaces
 511    that are larger than LargePageSizeInBytes use large pages.  Once we
 512    have determined this, all expand_by and shrink_by calls must grow and
 513    shrink by large page size chunks.  If a particular request
 514    is within the current large page, the call to commit and uncommit memory
 515    can be ignored.  In the case that the low and high boundaries of this
 516    space is not large page aligned, the pages leading to the first large
 517    page address and the pages after the last large page address must be
 518    allocated with default pages.
 519 */
 520 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 521   if (uncommitted_size() < bytes) return false;
 522 
 523   if (special()) {
 524     // don't commit memory if the entire space is pinned in memory
 525     _high += bytes;
 526     return true;
 527   }
 528 
 529   char* previous_high = high();
 530   char* unaligned_new_high = high() + bytes;
 531   assert(unaligned_new_high <= high_boundary(),
 532          "cannot expand by more than upper boundary");
 533 
 534   // Calculate where the new high for each of the regions should be.  If
 535   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 536   // then the unaligned lower and upper new highs would be the
 537   // lower_high() and upper_high() respectively.
 538   char* unaligned_lower_new_high =
 539     MIN2(unaligned_new_high, lower_high_boundary());
 540   char* unaligned_middle_new_high =
 541     MIN2(unaligned_new_high, middle_high_boundary());
 542   char* unaligned_upper_new_high =
 543     MIN2(unaligned_new_high, upper_high_boundary());
 544 
 545   // Align the new highs based on the regions alignment.  lower and upper
 546   // alignment will always be default page size.  middle alignment will be
 547   // LargePageSizeInBytes if the actual size of the virtual space is in
 548   // fact larger than LargePageSizeInBytes.
 549   char* aligned_lower_new_high =
 550     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 551   char* aligned_middle_new_high =
 552     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 553   char* aligned_upper_new_high =
 554     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 555 
 556   // Determine which regions need to grow in this expand_by call.
 557   // If you are growing in the lower region, high() must be in that
 558   // region so calculate the size based on high().  For the middle and
 559   // upper regions, determine the starting point of growth based on the
 560   // location of high().  By getting the MAX of the region's low address
 561   // (or the previous region's high address) and high(), we can tell if it
 562   // is an intra or inter region growth.
 563   size_t lower_needs = 0;
 564   if (aligned_lower_new_high > lower_high()) {
 565     lower_needs =
 566       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 567   }
 568   size_t middle_needs = 0;
 569   if (aligned_middle_new_high > middle_high()) {
 570     middle_needs =
 571       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 572   }
 573   size_t upper_needs = 0;
 574   if (aligned_upper_new_high > upper_high()) {
 575     upper_needs =
 576       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 577   }
 578 
 579   // Check contiguity.
 580   assert(low_boundary() <= lower_high() &&
 581          lower_high() <= lower_high_boundary(),
 582          "high address must be contained within the region");
 583   assert(lower_high_boundary() <= middle_high() &&
 584          middle_high() <= middle_high_boundary(),
 585          "high address must be contained within the region");
 586   assert(middle_high_boundary() <= upper_high() &&
 587          upper_high() <= upper_high_boundary(),
 588          "high address must be contained within the region");
 589 
 590   // Commit regions
 591   if (lower_needs > 0) {
 592     assert(low_boundary() <= lower_high() &&
 593            lower_high() + lower_needs <= lower_high_boundary(),
 594            "must not expand beyond region");
 595     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
 596       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 597                          ", lower_needs=" SIZE_FORMAT ", %d) failed",
 598                          lower_high(), lower_needs, _executable);)
 599       return false;
 600     } else {
 601       _lower_high += lower_needs;
 602     }
 603   }
 604   if (middle_needs > 0) {
 605     assert(lower_high_boundary() <= middle_high() &&
 606            middle_high() + middle_needs <= middle_high_boundary(),
 607            "must not expand beyond region");
 608     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
 609                            _executable)) {
 610       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 611                          ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
 612                          ", %d) failed", middle_high(), middle_needs,
 613                          middle_alignment(), _executable);)
 614       return false;
 615     }
 616     _middle_high += middle_needs;
 617   }
 618   if (upper_needs > 0) {
 619     assert(middle_high_boundary() <= upper_high() &&
 620            upper_high() + upper_needs <= upper_high_boundary(),
 621            "must not expand beyond region");
 622     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
 623       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 624                          ", upper_needs=" SIZE_FORMAT ", %d) failed",
 625                          upper_high(), upper_needs, _executable);)
 626       return false;
 627     } else {
 628       _upper_high += upper_needs;
 629     }
 630   }
 631 
 632   if (pre_touch || AlwaysPreTouch) {
 633     int vm_ps = os::vm_page_size();
 634     for (char* curr = previous_high;
 635          curr < unaligned_new_high;
 636          curr += vm_ps) {
 637       // Note the use of a write here; originally we tried just a read, but
 638       // since the value read was unused, the optimizer removed the read.
 639       // If we ever have a concurrent touchahead thread, we'll want to use
 640       // a read, to avoid the potential of overwriting data (if a mutator
 641       // thread beats the touchahead thread to a page).  There are various
 642       // ways of making sure this read is not optimized away: for example,
 643       // generating the code for a read procedure at runtime.
 644       *curr = 0;
 645     }
 646   }
 647 
 648   _high += bytes;
 649   return true;
 650 }
 651 
 652 // A page is uncommitted if the contents of the entire page is deemed unusable.
 653 // Continue to decrement the high() pointer until it reaches a page boundary
 654 // in which case that particular page can now be uncommitted.
 655 void VirtualSpace::shrink_by(size_t size) {
 656   if (committed_size() < size)
 657     fatal("Cannot shrink virtual space to negative size");
 658 
 659   if (special()) {
 660     // don't uncommit if the entire space is pinned in memory
 661     _high -= size;
 662     return;
 663   }
 664 
 665   char* unaligned_new_high = high() - size;
 666   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 667 
 668   // Calculate new unaligned address
 669   char* unaligned_upper_new_high =
 670     MAX2(unaligned_new_high, middle_high_boundary());
 671   char* unaligned_middle_new_high =
 672     MAX2(unaligned_new_high, lower_high_boundary());
 673   char* unaligned_lower_new_high =
 674     MAX2(unaligned_new_high, low_boundary());
 675 
 676   // Align address to region's alignment
 677   char* aligned_upper_new_high =
 678     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 679   char* aligned_middle_new_high =
 680     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 681   char* aligned_lower_new_high =
 682     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 683 
 684   // Determine which regions need to shrink
 685   size_t upper_needs = 0;
 686   if (aligned_upper_new_high < upper_high()) {
 687     upper_needs =
 688       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 689   }
 690   size_t middle_needs = 0;
 691   if (aligned_middle_new_high < middle_high()) {
 692     middle_needs =
 693       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 694   }
 695   size_t lower_needs = 0;
 696   if (aligned_lower_new_high < lower_high()) {
 697     lower_needs =
 698       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 699   }
 700 
 701   // Check contiguity.
 702   assert(middle_high_boundary() <= upper_high() &&
 703          upper_high() <= upper_high_boundary(),
 704          "high address must be contained within the region");
 705   assert(lower_high_boundary() <= middle_high() &&
 706          middle_high() <= middle_high_boundary(),
 707          "high address must be contained within the region");
 708   assert(low_boundary() <= lower_high() &&
 709          lower_high() <= lower_high_boundary(),
 710          "high address must be contained within the region");
 711 
 712   // Uncommit
 713   if (upper_needs > 0) {
 714     assert(middle_high_boundary() <= aligned_upper_new_high &&
 715            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 716            "must not shrink beyond region");
 717     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 718       debug_only(warning("os::uncommit_memory failed"));
 719       return;
 720     } else {
 721       _upper_high -= upper_needs;
 722     }
 723   }
 724   if (middle_needs > 0) {
 725     assert(lower_high_boundary() <= aligned_middle_new_high &&
 726            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 727            "must not shrink beyond region");
 728     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 729       debug_only(warning("os::uncommit_memory failed"));
 730       return;
 731     } else {
 732       _middle_high -= middle_needs;
 733     }
 734   }
 735   if (lower_needs > 0) {
 736     assert(low_boundary() <= aligned_lower_new_high &&
 737            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 738            "must not shrink beyond region");
 739     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 740       debug_only(warning("os::uncommit_memory failed"));
 741       return;
 742     } else {
 743       _lower_high -= lower_needs;
 744     }
 745   }
 746 
 747   _high -= size;
 748 }
 749 
 750 #ifndef PRODUCT
 751 void VirtualSpace::check_for_contiguity() {
 752   // Check contiguity.
 753   assert(low_boundary() <= lower_high() &&
 754          lower_high() <= lower_high_boundary(),
 755          "high address must be contained within the region");
 756   assert(lower_high_boundary() <= middle_high() &&
 757          middle_high() <= middle_high_boundary(),
 758          "high address must be contained within the region");
 759   assert(middle_high_boundary() <= upper_high() &&
 760          upper_high() <= upper_high_boundary(),
 761          "high address must be contained within the region");
 762   assert(low() >= low_boundary(), "low");
 763   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
 764   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
 765   assert(high() <= upper_high(), "upper high");
 766 }
 767 
 768 void VirtualSpace::print_on(outputStream* out) {
 769   out->print   ("Virtual space:");
 770   if (special()) out->print(" (pinned in memory)");
 771   out->cr();
 772   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
 773   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
 774   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
 775   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
 776 }
 777 
 778 void VirtualSpace::print() {
 779   print_on(tty);
 780 }
 781 
 782 /////////////// Unit tests ///////////////
 783 
 784 #ifndef PRODUCT
 785 
 786 #define test_log(...) \
 787   do {\
 788     if (VerboseInternalVMTests) { \
 789       tty->print_cr(__VA_ARGS__); \
 790       tty->flush(); \
 791     }\
 792   } while (false)
 793 
 794 class TestReservedSpace : AllStatic {
 795  public:
 796   static void small_page_write(void* addr, size_t size) {
 797     size_t page_size = os::vm_page_size();
 798 
 799     char* end = (char*)addr + size;
 800     for (char* p = (char*)addr; p < end; p += page_size) {
 801       *p = 1;
 802     }
 803   }
 804 
 805   static void release_memory_for_test(ReservedSpace rs) {
 806     if (rs.special()) {
 807       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
 808     } else {
 809       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
 810     }
 811   }
 812 
 813   static void test_reserved_space1(size_t size, size_t alignment) {
 814     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
 815 
 816     assert(is_size_aligned(size, alignment), "Incorrect input parameters");
 817 
 818     ReservedSpace rs(size,          // size
 819                      alignment,     // alignment
 820                      UseLargePages, // large
 821                      NULL,          // requested_address
 822                      0);            // noacces_prefix
 823 
 824     test_log(" rs.special() == %d", rs.special());
 825 
 826     assert(rs.base() != NULL, "Must be");
 827     assert(rs.size() == size, "Must be");
 828 
 829     assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
 830     assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
 831 
 832     if (rs.special()) {
 833       small_page_write(rs.base(), size);
 834     }
 835 
 836     release_memory_for_test(rs);
 837   }
 838 
 839   static void test_reserved_space2(size_t size) {
 840     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
 841 
 842     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
 843 
 844     ReservedSpace rs(size);
 845 
 846     test_log(" rs.special() == %d", rs.special());
 847 
 848     assert(rs.base() != NULL, "Must be");
 849     assert(rs.size() == size, "Must be");
 850 
 851     if (rs.special()) {
 852       small_page_write(rs.base(), size);
 853     }
 854 
 855     release_memory_for_test(rs);
 856   }
 857 
 858   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
 859     test_log("test_reserved_space3(%p, %p, %d)",
 860         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
 861 
 862     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
 863     assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
 864 
 865     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
 866 
 867     ReservedSpace rs(size, alignment, large, false);
 868 
 869     test_log(" rs.special() == %d", rs.special());
 870 
 871     assert(rs.base() != NULL, "Must be");
 872     assert(rs.size() == size, "Must be");
 873 
 874     if (rs.special()) {
 875       small_page_write(rs.base(), size);
 876     }
 877 
 878     release_memory_for_test(rs);
 879   }
 880 
 881 
 882   static void test_reserved_space1() {
 883     size_t size = 2 * 1024 * 1024;
 884     size_t ag   = os::vm_allocation_granularity();
 885 
 886     test_reserved_space1(size,      ag);
 887     test_reserved_space1(size * 2,  ag);
 888     test_reserved_space1(size * 10, ag);
 889   }
 890 
 891   static void test_reserved_space2() {
 892     size_t size = 2 * 1024 * 1024;
 893     size_t ag = os::vm_allocation_granularity();
 894 
 895     test_reserved_space2(size * 1);
 896     test_reserved_space2(size * 2);
 897     test_reserved_space2(size * 10);
 898     test_reserved_space2(ag);
 899     test_reserved_space2(size - ag);
 900     test_reserved_space2(size);
 901     test_reserved_space2(size + ag);
 902     test_reserved_space2(size * 2);
 903     test_reserved_space2(size * 2 - ag);
 904     test_reserved_space2(size * 2 + ag);
 905     test_reserved_space2(size * 3);
 906     test_reserved_space2(size * 3 - ag);
 907     test_reserved_space2(size * 3 + ag);
 908     test_reserved_space2(size * 10);
 909     test_reserved_space2(size * 10 + size / 2);
 910   }
 911 
 912   static void test_reserved_space3() {
 913     size_t ag = os::vm_allocation_granularity();
 914 
 915     test_reserved_space3(ag,      ag    , false);
 916     test_reserved_space3(ag * 2,  ag    , false);
 917     test_reserved_space3(ag * 3,  ag    , false);
 918     test_reserved_space3(ag * 2,  ag * 2, false);
 919     test_reserved_space3(ag * 4,  ag * 2, false);
 920     test_reserved_space3(ag * 8,  ag * 2, false);
 921     test_reserved_space3(ag * 4,  ag * 4, false);
 922     test_reserved_space3(ag * 8,  ag * 4, false);
 923     test_reserved_space3(ag * 16, ag * 4, false);
 924 
 925     if (UseLargePages) {
 926       size_t lp = os::large_page_size();
 927 
 928       // Without large pages
 929       test_reserved_space3(lp,     ag * 4, false);
 930       test_reserved_space3(lp * 2, ag * 4, false);
 931       test_reserved_space3(lp * 4, ag * 4, false);
 932       test_reserved_space3(lp,     lp    , false);
 933       test_reserved_space3(lp * 2, lp    , false);
 934       test_reserved_space3(lp * 3, lp    , false);
 935       test_reserved_space3(lp * 2, lp * 2, false);
 936       test_reserved_space3(lp * 4, lp * 2, false);
 937       test_reserved_space3(lp * 8, lp * 2, false);
 938 
 939       // With large pages
 940       test_reserved_space3(lp, ag * 4    , true);
 941       test_reserved_space3(lp * 2, ag * 4, true);
 942       test_reserved_space3(lp * 4, ag * 4, true);
 943       test_reserved_space3(lp, lp        , true);
 944       test_reserved_space3(lp * 2, lp    , true);
 945       test_reserved_space3(lp * 3, lp    , true);
 946       test_reserved_space3(lp * 2, lp * 2, true);
 947       test_reserved_space3(lp * 4, lp * 2, true);
 948       test_reserved_space3(lp * 8, lp * 2, true);
 949     }
 950   }
 951 
 952   static void test_reserved_space() {
 953     test_reserved_space1();
 954     test_reserved_space2();
 955     test_reserved_space3();
 956   }
 957 };
 958 
 959 void TestReservedSpace_test() {
 960   TestReservedSpace::test_reserved_space();
 961 }
 962 
 963 #define assert_equals(actual, expected)     \
 964   assert(actual == expected,                \
 965     err_msg("Got " SIZE_FORMAT " expected " \
 966       SIZE_FORMAT, actual, expected));
 967 
 968 #define assert_ge(value1, value2)                  \
 969   assert(value1 >= value2,                         \
 970     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
 971       #value2 "': " SIZE_FORMAT, value1, value2));
 972 
 973 #define assert_lt(value1, value2)                  \
 974   assert(value1 < value2,                          \
 975     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
 976       #value2 "': " SIZE_FORMAT, value1, value2));
 977 
 978 
 979 class TestVirtualSpace : AllStatic {
 980   enum TestLargePages {
 981     Default,
 982     Disable,
 983     Reserve,
 984     Commit
 985   };
 986 
 987   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
 988     switch(mode) {
 989     default:
 990     case Default:
 991     case Reserve:
 992       return ReservedSpace(reserve_size_aligned);
 993     case Disable:
 994     case Commit:
 995       return ReservedSpace(reserve_size_aligned,
 996                            os::vm_allocation_granularity(),
 997                            /* large */ false, /* exec */ false);
 998     }
 999   }
1000 
1001   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1002     switch(mode) {
1003     default:
1004     case Default:
1005     case Reserve:
1006       return vs.initialize(rs, 0);
1007     case Disable:
1008       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1009     case Commit:
1010       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region(rs.size(), rs.size(), 1));
1011     }
1012   }
1013 
1014  public:
1015   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1016                                                         TestLargePages mode = Default) {
1017     size_t granularity = os::vm_allocation_granularity();
1018     size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
1019 
1020     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1021 
1022     assert(reserved.is_reserved(), "Must be");
1023 
1024     VirtualSpace vs;
1025     bool initialized = initialize_virtual_space(vs, reserved, mode);
1026     assert(initialized, "Failed to initialize VirtualSpace");
1027 
1028     vs.expand_by(commit_size, false);
1029 
1030     if (vs.special()) {
1031       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1032     } else {
1033       assert_ge(vs.actual_committed_size(), commit_size);
1034       // Approximate the commit granularity.
1035       // Make sure that we don't commit using large pages
1036       // if large pages has been disabled for this VirtualSpace.
1037       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1038                                    os::vm_page_size() : os::large_page_size();
1039       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1040     }
1041 
1042     reserved.release();
1043   }
1044 
1045   static void test_virtual_space_actual_committed_space_one_large_page() {
1046     if (!UseLargePages) {
1047       return;
1048     }
1049 
1050     size_t large_page_size = os::large_page_size();
1051 
1052     ReservedSpace reserved(large_page_size, large_page_size, true, false);
1053 
1054     assert(reserved.is_reserved(), "Must be");
1055 
1056     VirtualSpace vs;
1057     bool initialized = vs.initialize(reserved, 0);
1058     assert(initialized, "Failed to initialize VirtualSpace");
1059 
1060     vs.expand_by(large_page_size, false);
1061 
1062     assert_equals(vs.actual_committed_size(), large_page_size);
1063 
1064     reserved.release();
1065   }
1066 
1067   static void test_virtual_space_actual_committed_space() {
1068     test_virtual_space_actual_committed_space(4 * K, 0);
1069     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1070     test_virtual_space_actual_committed_space(8 * K, 0);
1071     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1072     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1073     test_virtual_space_actual_committed_space(12 * K, 0);
1074     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1075     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1076     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1077     test_virtual_space_actual_committed_space(64 * K, 0);
1078     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1079     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1080     test_virtual_space_actual_committed_space(2 * M, 0);
1081     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1082     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1083     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1084     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1085     test_virtual_space_actual_committed_space(10 * M, 0);
1086     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1087     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1088     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1089     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1090     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1091     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1092   }
1093 
1094   static void test_virtual_space_disable_large_pages() {
1095     if (!UseLargePages) {
1096       return;
1097     }
1098     // These test cases verify that if we force VirtualSpace to disable large pages
1099     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1100     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1101     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1102     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1103     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1104     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1105     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1106 
1107     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1108     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1109     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1110     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1111     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1112     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1113     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1114 
1115     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1116     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1117     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1118     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1119     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1120     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1121     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1122   }
1123 
1124   static void test_virtual_space() {
1125     test_virtual_space_actual_committed_space();
1126     test_virtual_space_actual_committed_space_one_large_page();
1127     test_virtual_space_disable_large_pages();
1128   }
1129 };
1130 
1131 void TestVirtualSpace_test() {
1132   TestVirtualSpace::test_virtual_space();
1133 }
1134 
1135 #endif // PRODUCT
1136 
1137 #endif