1 /*
   2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "oops/markOop.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "runtime/virtualspace.hpp"
  29 #include "services/memTracker.hpp"
  30 #ifdef TARGET_OS_FAMILY_linux
  31 # include "os_linux.inline.hpp"
  32 #endif
  33 #ifdef TARGET_OS_FAMILY_solaris
  34 # include "os_solaris.inline.hpp"
  35 #endif
  36 #ifdef TARGET_OS_FAMILY_windows
  37 # include "os_windows.inline.hpp"
  38 #endif
  39 #ifdef TARGET_OS_FAMILY_bsd
  40 # include "os_bsd.inline.hpp"
  41 #endif
  42 
  43 
  44 // ReservedSpace
  45 
  46 // Dummy constructor
  47 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  48     _alignment(0), _special(false), _executable(false) {
  49 }
  50 
  51 ReservedSpace::ReservedSpace(size_t size) {
  52   size_t page_size = os::page_size_for_region(size, size, 1);
  53   bool large_pages = page_size != (size_t)os::vm_page_size();
  54   // Don't force the alignment to be large page aligned,
  55   // since that will waste memory.
  56   size_t alignment = os::vm_allocation_granularity();
  57   initialize(size, alignment, large_pages, NULL, 0, false);
  58 }
  59 
  60 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  61                              bool large,
  62                              char* requested_address,
  63                              const size_t noaccess_prefix) {
  64   initialize(size+noaccess_prefix, alignment, large, requested_address,
  65              noaccess_prefix, false);
  66 }
  67 
  68 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  69                              bool large,
  70                              bool executable) {
  71   initialize(size, alignment, large, NULL, 0, executable);
  72 }
  73 
  74 // Helper method.
  75 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  76                                            const size_t size, bool special)
  77 {
  78   if (base == requested_address || requested_address == NULL)
  79     return false; // did not fail
  80 
  81   if (base != NULL) {
  82     // Different reserve address may be acceptable in other cases
  83     // but for compressed oops heap should be at requested address.
  84     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  85     if (PrintCompressedOopsMode) {
  86       tty->cr();
  87       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
  88     }
  89     // OS ignored requested address. Try different address.
  90     if (special) {
  91       if (!os::release_memory_special(base, size)) {
  92         fatal("os::release_memory_special failed");
  93       }
  94     } else {
  95       if (!os::release_memory(base, size)) {
  96         fatal("os::release_memory failed");
  97       }
  98     }
  99   }
 100   return true;
 101 }
 102 
 103 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
 104                                char* requested_address,
 105                                const size_t noaccess_prefix,
 106                                bool executable) {
 107   const size_t granularity = os::vm_allocation_granularity();
 108   assert((size & (granularity - 1)) == 0,
 109          "size not aligned to os::vm_allocation_granularity()");
 110   assert((alignment & (granularity - 1)) == 0,
 111          "alignment not aligned to os::vm_allocation_granularity()");
 112   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 113          "not a power of 2");
 114 
 115   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 116 
 117   // Assert that if noaccess_prefix is used, it is the same as alignment.
 118   assert(noaccess_prefix == 0 ||
 119          noaccess_prefix == alignment, "noaccess prefix wrong");
 120 
 121   _base = NULL;
 122   _size = 0;
 123   _special = false;
 124   _executable = executable;
 125   _alignment = 0;
 126   _noaccess_prefix = 0;
 127   if (size == 0) {
 128     return;
 129   }
 130 
 131   // If OS doesn't support demand paging for large page memory, we need
 132   // to use reserve_memory_special() to reserve and pin the entire region.
 133   bool special = large && !os::can_commit_large_page_memory();
 134   char* base = NULL;
 135 
 136   if (requested_address != 0) {
 137     requested_address -= noaccess_prefix; // adjust requested address
 138     assert(requested_address != NULL, "huge noaccess prefix?");
 139   }
 140 
 141   if (special) {
 142 
 143     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 144 
 145     if (base != NULL) {
 146       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 147         // OS ignored requested address. Try different address.
 148         return;
 149       }
 150       // Check alignment constraints.
 151       assert((uintptr_t) base % alignment == 0,
 152              err_msg("Large pages returned a non-aligned address, base: "
 153                  PTR_FORMAT " alignment: " PTR_FORMAT,
 154                  base, (void*)(uintptr_t)alignment));
 155       _special = true;
 156     } else {
 157       // failed; try to reserve regular memory below
 158       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 159                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 160         if (PrintCompressedOopsMode) {
 161           tty->cr();
 162           tty->print_cr("Reserve regular memory without large pages.");
 163         }
 164       }
 165     }
 166   }
 167 
 168   if (base == NULL) {
 169     // Optimistically assume that the OSes returns an aligned base pointer.
 170     // When reserving a large address range, most OSes seem to align to at
 171     // least 64K.
 172 
 173     // If the memory was requested at a particular address, use
 174     // os::attempt_reserve_memory_at() to avoid over mapping something
 175     // important.  If available space is not detected, return NULL.
 176 
 177     if (requested_address != 0) {
 178       base = os::attempt_reserve_memory_at(size, requested_address);
 179       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 180         // OS ignored requested address. Try different address.
 181         base = NULL;
 182       }
 183     } else {
 184       base = os::reserve_memory(size, NULL, alignment);
 185     }
 186 
 187     if (base == NULL) return;
 188 
 189     // Check alignment constraints
 190     if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
 191       // Base not aligned, retry
 192       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 193       // Make sure that size is aligned
 194       size = align_size_up(size, alignment);
 195       base = os::reserve_memory_aligned(size, alignment);
 196 
 197       if (requested_address != 0 &&
 198           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 199         // As a result of the alignment constraints, the allocated base differs
 200         // from the requested address. Return back to the caller who can
 201         // take remedial action (like try again without a requested address).
 202         assert(_base == NULL, "should be");
 203         return;
 204       }
 205     }
 206   }
 207   // Done
 208   _base = base;
 209   _size = size;
 210   _alignment = alignment;
 211   _noaccess_prefix = noaccess_prefix;
 212 
 213   // Assert that if noaccess_prefix is used, it is the same as alignment.
 214   assert(noaccess_prefix == 0 ||
 215          noaccess_prefix == _alignment, "noaccess prefix wrong");
 216 
 217   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 218          "area must be distinguisable from marks for mark-sweep");
 219   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 220          "area must be distinguisable from marks for mark-sweep");
 221 }
 222 
 223 
 224 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 225                              bool special, bool executable) {
 226   assert((size % os::vm_allocation_granularity()) == 0,
 227          "size not allocation aligned");
 228   _base = base;
 229   _size = size;
 230   _alignment = alignment;
 231   _noaccess_prefix = 0;
 232   _special = special;
 233   _executable = executable;
 234 }
 235 
 236 
 237 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 238                                         bool split, bool realloc) {
 239   assert(partition_size <= size(), "partition failed");
 240   if (split) {
 241     os::split_reserved_memory(base(), size(), partition_size, realloc);
 242   }
 243   ReservedSpace result(base(), partition_size, alignment, special(),
 244                        executable());
 245   return result;
 246 }
 247 
 248 
 249 ReservedSpace
 250 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 251   assert(partition_size <= size(), "partition failed");
 252   ReservedSpace result(base() + partition_size, size() - partition_size,
 253                        alignment, special(), executable());
 254   return result;
 255 }
 256 
 257 
 258 size_t ReservedSpace::page_align_size_up(size_t size) {
 259   return align_size_up(size, os::vm_page_size());
 260 }
 261 
 262 
 263 size_t ReservedSpace::page_align_size_down(size_t size) {
 264   return align_size_down(size, os::vm_page_size());
 265 }
 266 
 267 
 268 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 269   return align_size_up(size, os::vm_allocation_granularity());
 270 }
 271 
 272 
 273 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 274   return align_size_down(size, os::vm_allocation_granularity());
 275 }
 276 
 277 
 278 void ReservedSpace::release() {
 279   if (is_reserved()) {
 280     char *real_base = _base - _noaccess_prefix;
 281     const size_t real_size = _size + _noaccess_prefix;
 282     if (special()) {
 283       os::release_memory_special(real_base, real_size);
 284     } else{
 285       os::release_memory(real_base, real_size);
 286     }
 287     _base = NULL;
 288     _size = 0;
 289     _noaccess_prefix = 0;
 290     _special = false;
 291     _executable = false;
 292   }
 293 }
 294 
 295 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
 296   assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
 297                                       (Universe::narrow_oop_base() != NULL) &&
 298                                       Universe::narrow_oop_use_implicit_null_checks()),
 299          "noaccess_prefix should be used only with non zero based compressed oops");
 300 
 301   // If there is no noaccess prefix, return.
 302   if (_noaccess_prefix == 0) return;
 303 
 304   assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
 305          "must be at least page size big");
 306 
 307   // Protect memory at the base of the allocated region.
 308   // If special, the page was committed (only matters on windows)
 309   if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
 310                           _special)) {
 311     fatal("cannot protect protection page");
 312   }
 313   if (PrintCompressedOopsMode) {
 314     tty->cr();
 315     tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
 316   }
 317 
 318   _base += _noaccess_prefix;
 319   _size -= _noaccess_prefix;
 320   assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
 321          "must be exactly of required size and alignment");
 322 }
 323 
 324 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
 325                                      bool large, char* requested_address) :
 326   ReservedSpace(size, alignment, large,
 327                 requested_address,
 328                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
 329                  Universe::narrow_oop_use_implicit_null_checks()) ?
 330                   lcm(os::vm_page_size(), alignment) : 0) {
 331   if (base() > 0) {
 332     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 333   }
 334 
 335   // Only reserved space for the java heap should have a noaccess_prefix
 336   // if using compressed oops.
 337   protect_noaccess_prefix(size);
 338 }
 339 
 340 // Reserve space for code segment.  Same as Java heap only we mark this as
 341 // executable.
 342 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 343                                      size_t rs_align,
 344                                      bool large) :
 345   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 346   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 347 }
 348 
 349 // VirtualSpace
 350 
 351 VirtualSpace::VirtualSpace() {
 352   _low_boundary           = NULL;
 353   _high_boundary          = NULL;
 354   _low                    = NULL;
 355   _high                   = NULL;
 356   _lower_high             = NULL;
 357   _middle_high            = NULL;
 358   _upper_high             = NULL;
 359   _lower_high_boundary    = NULL;
 360   _middle_high_boundary   = NULL;
 361   _upper_high_boundary    = NULL;
 362   _lower_alignment        = 0;
 363   _middle_alignment       = 0;
 364   _upper_alignment        = 0;
 365   _special                = false;
 366   _executable             = false;
 367 }
 368 
 369 
 370 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 371   if(!rs.is_reserved()) return false;  // allocation failed.
 372   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 373   _low_boundary  = rs.base();
 374   _high_boundary = low_boundary() + rs.size();
 375 
 376   _low = low_boundary();
 377   _high = low();
 378 
 379   _special = rs.special();
 380   _executable = rs.executable();
 381 
 382   // When a VirtualSpace begins life at a large size, make all future expansion
 383   // and shrinking occur aligned to a granularity of large pages.  This avoids
 384   // fragmentation of physical addresses that inhibits the use of large pages
 385   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 386   // page size, the only spaces that get handled this way are codecache and
 387   // the heap itself, both of which provide a substantial performance
 388   // boost in many benchmarks when covered by large pages.
 389   //
 390   // No attempt is made to force large page alignment at the very top and
 391   // bottom of the space if they are not aligned so already.
 392   _lower_alignment  = os::vm_page_size();
 393   _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1);
 394   _upper_alignment  = os::vm_page_size();
 395 
 396   // End of each region
 397   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
 398   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
 399   _upper_high_boundary = high_boundary();
 400 
 401   // High address of each region
 402   _lower_high = low_boundary();
 403   _middle_high = lower_high_boundary();
 404   _upper_high = middle_high_boundary();
 405 
 406   // commit to initial size
 407   if (committed_size > 0) {
 408     if (!expand_by(committed_size)) {
 409       return false;
 410     }
 411   }
 412   return true;
 413 }
 414 
 415 
 416 VirtualSpace::~VirtualSpace() {
 417   release();
 418 }
 419 
 420 
 421 void VirtualSpace::release() {
 422   // This does not release memory it never reserved.
 423   // Caller must release via rs.release();
 424   _low_boundary           = NULL;
 425   _high_boundary          = NULL;
 426   _low                    = NULL;
 427   _high                   = NULL;
 428   _lower_high             = NULL;
 429   _middle_high            = NULL;
 430   _upper_high             = NULL;
 431   _lower_high_boundary    = NULL;
 432   _middle_high_boundary   = NULL;
 433   _upper_high_boundary    = NULL;
 434   _lower_alignment        = 0;
 435   _middle_alignment       = 0;
 436   _upper_alignment        = 0;
 437   _special                = false;
 438   _executable             = false;
 439 }
 440 
 441 
 442 size_t VirtualSpace::committed_size() const {
 443   return pointer_delta(high(), low(), sizeof(char));
 444 }
 445 
 446 
 447 size_t VirtualSpace::reserved_size() const {
 448   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 449 }
 450 
 451 
 452 size_t VirtualSpace::uncommitted_size()  const {
 453   return reserved_size() - committed_size();
 454 }
 455 
 456 size_t VirtualSpace::actual_committed_size() const {
 457   // Special VirtualSpaces commit all reserved space up front.
 458   if (special()) {
 459     return reserved_size();
 460   }
 461 
 462   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 463   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 464   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 465 
 466 #ifdef ASSERT
 467   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 468   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 469   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 470 
 471   if (committed_high > 0) {
 472     assert(committed_low == lower, "Must be");
 473     assert(committed_middle == middle, "Must be");
 474   }
 475 
 476   if (committed_middle > 0) {
 477     assert(committed_low == lower, "Must be");
 478   }
 479   if (committed_middle < middle) {
 480     assert(committed_high == 0, "Must be");
 481   }
 482 
 483   if (committed_low < lower) {
 484     assert(committed_high == 0, "Must be");
 485     assert(committed_middle == 0, "Must be");
 486   }
 487 #endif
 488 
 489   return committed_low + committed_middle + committed_high;
 490 }
 491 
 492 
 493 bool VirtualSpace::contains(const void* p) const {
 494   return low() <= (const char*) p && (const char*) p < high();
 495 }
 496 
 497 /*
 498    First we need to determine if a particular virtual space is using large
 499    pages.  This is done at the initialize function and only virtual spaces
 500    that are larger than LargePageSizeInBytes use large pages.  Once we
 501    have determined this, all expand_by and shrink_by calls must grow and
 502    shrink by large page size chunks.  If a particular request
 503    is within the current large page, the call to commit and uncommit memory
 504    can be ignored.  In the case that the low and high boundaries of this
 505    space is not large page aligned, the pages leading to the first large
 506    page address and the pages after the last large page address must be
 507    allocated with default pages.
 508 */
 509 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 510   if (uncommitted_size() < bytes) return false;
 511 
 512   if (special()) {
 513     // don't commit memory if the entire space is pinned in memory
 514     _high += bytes;
 515     return true;
 516   }
 517 
 518   char* previous_high = high();
 519   char* unaligned_new_high = high() + bytes;
 520   assert(unaligned_new_high <= high_boundary(),
 521          "cannot expand by more than upper boundary");
 522 
 523   // Calculate where the new high for each of the regions should be.  If
 524   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 525   // then the unaligned lower and upper new highs would be the
 526   // lower_high() and upper_high() respectively.
 527   char* unaligned_lower_new_high =
 528     MIN2(unaligned_new_high, lower_high_boundary());
 529   char* unaligned_middle_new_high =
 530     MIN2(unaligned_new_high, middle_high_boundary());
 531   char* unaligned_upper_new_high =
 532     MIN2(unaligned_new_high, upper_high_boundary());
 533 
 534   // Align the new highs based on the regions alignment.  lower and upper
 535   // alignment will always be default page size.  middle alignment will be
 536   // LargePageSizeInBytes if the actual size of the virtual space is in
 537   // fact larger than LargePageSizeInBytes.
 538   char* aligned_lower_new_high =
 539     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 540   char* aligned_middle_new_high =
 541     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 542   char* aligned_upper_new_high =
 543     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 544 
 545   // Determine which regions need to grow in this expand_by call.
 546   // If you are growing in the lower region, high() must be in that
 547   // region so calculate the size based on high().  For the middle and
 548   // upper regions, determine the starting point of growth based on the
 549   // location of high().  By getting the MAX of the region's low address
 550   // (or the previous region's high address) and high(), we can tell if it
 551   // is an intra or inter region growth.
 552   size_t lower_needs = 0;
 553   if (aligned_lower_new_high > lower_high()) {
 554     lower_needs =
 555       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 556   }
 557   size_t middle_needs = 0;
 558   if (aligned_middle_new_high > middle_high()) {
 559     middle_needs =
 560       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 561   }
 562   size_t upper_needs = 0;
 563   if (aligned_upper_new_high > upper_high()) {
 564     upper_needs =
 565       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 566   }
 567 
 568   // Check contiguity.
 569   assert(low_boundary() <= lower_high() &&
 570          lower_high() <= lower_high_boundary(),
 571          "high address must be contained within the region");
 572   assert(lower_high_boundary() <= middle_high() &&
 573          middle_high() <= middle_high_boundary(),
 574          "high address must be contained within the region");
 575   assert(middle_high_boundary() <= upper_high() &&
 576          upper_high() <= upper_high_boundary(),
 577          "high address must be contained within the region");
 578 
 579   // Commit regions
 580   if (lower_needs > 0) {
 581     assert(low_boundary() <= lower_high() &&
 582            lower_high() + lower_needs <= lower_high_boundary(),
 583            "must not expand beyond region");
 584     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
 585       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 586                          ", lower_needs=" SIZE_FORMAT ", %d) failed",
 587                          lower_high(), lower_needs, _executable);)
 588       return false;
 589     } else {
 590       _lower_high += lower_needs;
 591     }
 592   }
 593   if (middle_needs > 0) {
 594     assert(lower_high_boundary() <= middle_high() &&
 595            middle_high() + middle_needs <= middle_high_boundary(),
 596            "must not expand beyond region");
 597     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
 598                            _executable)) {
 599       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 600                          ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
 601                          ", %d) failed", middle_high(), middle_needs,
 602                          middle_alignment(), _executable);)
 603       return false;
 604     }
 605     _middle_high += middle_needs;
 606   }
 607   if (upper_needs > 0) {
 608     assert(middle_high_boundary() <= upper_high() &&
 609            upper_high() + upper_needs <= upper_high_boundary(),
 610            "must not expand beyond region");
 611     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
 612       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 613                          ", upper_needs=" SIZE_FORMAT ", %d) failed",
 614                          upper_high(), upper_needs, _executable);)
 615       return false;
 616     } else {
 617       _upper_high += upper_needs;
 618     }
 619   }
 620 
 621   if (pre_touch || AlwaysPreTouch) {
 622     int vm_ps = os::vm_page_size();
 623     for (char* curr = previous_high;
 624          curr < unaligned_new_high;
 625          curr += vm_ps) {
 626       // Note the use of a write here; originally we tried just a read, but
 627       // since the value read was unused, the optimizer removed the read.
 628       // If we ever have a concurrent touchahead thread, we'll want to use
 629       // a read, to avoid the potential of overwriting data (if a mutator
 630       // thread beats the touchahead thread to a page).  There are various
 631       // ways of making sure this read is not optimized away: for example,
 632       // generating the code for a read procedure at runtime.
 633       *curr = 0;
 634     }
 635   }
 636 
 637   _high += bytes;
 638   return true;
 639 }
 640 
 641 // A page is uncommitted if the contents of the entire page is deemed unusable.
 642 // Continue to decrement the high() pointer until it reaches a page boundary
 643 // in which case that particular page can now be uncommitted.
 644 void VirtualSpace::shrink_by(size_t size) {
 645   if (committed_size() < size)
 646     fatal("Cannot shrink virtual space to negative size");
 647 
 648   if (special()) {
 649     // don't uncommit if the entire space is pinned in memory
 650     _high -= size;
 651     return;
 652   }
 653 
 654   char* unaligned_new_high = high() - size;
 655   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 656 
 657   // Calculate new unaligned address
 658   char* unaligned_upper_new_high =
 659     MAX2(unaligned_new_high, middle_high_boundary());
 660   char* unaligned_middle_new_high =
 661     MAX2(unaligned_new_high, lower_high_boundary());
 662   char* unaligned_lower_new_high =
 663     MAX2(unaligned_new_high, low_boundary());
 664 
 665   // Align address to region's alignment
 666   char* aligned_upper_new_high =
 667     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 668   char* aligned_middle_new_high =
 669     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 670   char* aligned_lower_new_high =
 671     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 672 
 673   // Determine which regions need to shrink
 674   size_t upper_needs = 0;
 675   if (aligned_upper_new_high < upper_high()) {
 676     upper_needs =
 677       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 678   }
 679   size_t middle_needs = 0;
 680   if (aligned_middle_new_high < middle_high()) {
 681     middle_needs =
 682       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 683   }
 684   size_t lower_needs = 0;
 685   if (aligned_lower_new_high < lower_high()) {
 686     lower_needs =
 687       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 688   }
 689 
 690   // Check contiguity.
 691   assert(middle_high_boundary() <= upper_high() &&
 692          upper_high() <= upper_high_boundary(),
 693          "high address must be contained within the region");
 694   assert(lower_high_boundary() <= middle_high() &&
 695          middle_high() <= middle_high_boundary(),
 696          "high address must be contained within the region");
 697   assert(low_boundary() <= lower_high() &&
 698          lower_high() <= lower_high_boundary(),
 699          "high address must be contained within the region");
 700 
 701   // Uncommit
 702   if (upper_needs > 0) {
 703     assert(middle_high_boundary() <= aligned_upper_new_high &&
 704            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 705            "must not shrink beyond region");
 706     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 707       debug_only(warning("os::uncommit_memory failed"));
 708       return;
 709     } else {
 710       _upper_high -= upper_needs;
 711     }
 712   }
 713   if (middle_needs > 0) {
 714     assert(lower_high_boundary() <= aligned_middle_new_high &&
 715            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 716            "must not shrink beyond region");
 717     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 718       debug_only(warning("os::uncommit_memory failed"));
 719       return;
 720     } else {
 721       _middle_high -= middle_needs;
 722     }
 723   }
 724   if (lower_needs > 0) {
 725     assert(low_boundary() <= aligned_lower_new_high &&
 726            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 727            "must not shrink beyond region");
 728     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 729       debug_only(warning("os::uncommit_memory failed"));
 730       return;
 731     } else {
 732       _lower_high -= lower_needs;
 733     }
 734   }
 735 
 736   _high -= size;
 737 }
 738 
 739 #ifndef PRODUCT
 740 void VirtualSpace::check_for_contiguity() {
 741   // Check contiguity.
 742   assert(low_boundary() <= lower_high() &&
 743          lower_high() <= lower_high_boundary(),
 744          "high address must be contained within the region");
 745   assert(lower_high_boundary() <= middle_high() &&
 746          middle_high() <= middle_high_boundary(),
 747          "high address must be contained within the region");
 748   assert(middle_high_boundary() <= upper_high() &&
 749          upper_high() <= upper_high_boundary(),
 750          "high address must be contained within the region");
 751   assert(low() >= low_boundary(), "low");
 752   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
 753   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
 754   assert(high() <= upper_high(), "upper high");
 755 }
 756 
 757 void VirtualSpace::print_on(outputStream* out) {
 758   out->print   ("Virtual space:");
 759   if (special()) out->print(" (pinned in memory)");
 760   out->cr();
 761   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
 762   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
 763   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
 764   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
 765 }
 766 
 767 void VirtualSpace::print() {
 768   print_on(tty);
 769 }
 770 
 771 /////////////// Unit tests ///////////////
 772 
 773 #ifndef PRODUCT
 774 
 775 #define test_log(...) \
 776   do {\
 777     if (VerboseInternalVMTests) { \
 778       tty->print_cr(__VA_ARGS__); \
 779       tty->flush(); \
 780     }\
 781   } while (false)
 782 
 783 class TestReservedSpace : AllStatic {
 784  public:
 785   static void small_page_write(void* addr, size_t size) {
 786     size_t page_size = os::vm_page_size();
 787 
 788     char* end = (char*)addr + size;
 789     for (char* p = (char*)addr; p < end; p += page_size) {
 790       *p = 1;
 791     }
 792   }
 793 
 794   static void release_memory_for_test(ReservedSpace rs) {
 795     if (rs.special()) {
 796       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
 797     } else {
 798       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
 799     }
 800   }
 801 
 802   static void test_reserved_space1(size_t size, size_t alignment) {
 803     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
 804 
 805     assert(is_size_aligned(size, alignment), "Incorrect input parameters");
 806 
 807     ReservedSpace rs(size,          // size
 808                      alignment,     // alignment
 809                      UseLargePages, // large
 810                      NULL,          // requested_address
 811                      0);            // noacces_prefix
 812 
 813     test_log(" rs.special() == %d", rs.special());
 814 
 815     assert(rs.base() != NULL, "Must be");
 816     assert(rs.size() == size, "Must be");
 817 
 818     assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
 819     assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
 820 
 821     if (rs.special()) {
 822       small_page_write(rs.base(), size);
 823     }
 824 
 825     release_memory_for_test(rs);
 826   }
 827 
 828   static void test_reserved_space2(size_t size) {
 829     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
 830 
 831     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
 832 
 833     ReservedSpace rs(size);
 834 
 835     test_log(" rs.special() == %d", rs.special());
 836 
 837     assert(rs.base() != NULL, "Must be");
 838     assert(rs.size() == size, "Must be");
 839 
 840     if (rs.special()) {
 841       small_page_write(rs.base(), size);
 842     }
 843 
 844     release_memory_for_test(rs);
 845   }
 846 
 847   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
 848     test_log("test_reserved_space3(%p, %p, %d)",
 849         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
 850 
 851     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
 852     assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
 853 
 854     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
 855 
 856     ReservedSpace rs(size, alignment, large, false);
 857 
 858     test_log(" rs.special() == %d", rs.special());
 859 
 860     assert(rs.base() != NULL, "Must be");
 861     assert(rs.size() == size, "Must be");
 862 
 863     if (rs.special()) {
 864       small_page_write(rs.base(), size);
 865     }
 866 
 867     release_memory_for_test(rs);
 868   }
 869 
 870 
 871   static void test_reserved_space1() {
 872     size_t size = 2 * 1024 * 1024;
 873     size_t ag   = os::vm_allocation_granularity();
 874 
 875     test_reserved_space1(size,      ag);
 876     test_reserved_space1(size * 2,  ag);
 877     test_reserved_space1(size * 10, ag);
 878   }
 879 
 880   static void test_reserved_space2() {
 881     size_t size = 2 * 1024 * 1024;
 882     size_t ag = os::vm_allocation_granularity();
 883 
 884     test_reserved_space2(size * 1);
 885     test_reserved_space2(size * 2);
 886     test_reserved_space2(size * 10);
 887     test_reserved_space2(ag);
 888     test_reserved_space2(size - ag);
 889     test_reserved_space2(size);
 890     test_reserved_space2(size + ag);
 891     test_reserved_space2(size * 2);
 892     test_reserved_space2(size * 2 - ag);
 893     test_reserved_space2(size * 2 + ag);
 894     test_reserved_space2(size * 3);
 895     test_reserved_space2(size * 3 - ag);
 896     test_reserved_space2(size * 3 + ag);
 897     test_reserved_space2(size * 10);
 898     test_reserved_space2(size * 10 + size / 2);
 899   }
 900 
 901   static void test_reserved_space3() {
 902     size_t ag = os::vm_allocation_granularity();
 903 
 904     test_reserved_space3(ag,      ag    , false);
 905     test_reserved_space3(ag * 2,  ag    , false);
 906     test_reserved_space3(ag * 3,  ag    , false);
 907     test_reserved_space3(ag * 2,  ag * 2, false);
 908     test_reserved_space3(ag * 4,  ag * 2, false);
 909     test_reserved_space3(ag * 8,  ag * 2, false);
 910     test_reserved_space3(ag * 4,  ag * 4, false);
 911     test_reserved_space3(ag * 8,  ag * 4, false);
 912     test_reserved_space3(ag * 16, ag * 4, false);
 913 
 914     if (UseLargePages) {
 915       size_t lp = os::large_page_size();
 916 
 917       // Without large pages
 918       test_reserved_space3(lp,     ag * 4, false);
 919       test_reserved_space3(lp * 2, ag * 4, false);
 920       test_reserved_space3(lp * 4, ag * 4, false);
 921       test_reserved_space3(lp,     lp    , false);
 922       test_reserved_space3(lp * 2, lp    , false);
 923       test_reserved_space3(lp * 3, lp    , false);
 924       test_reserved_space3(lp * 2, lp * 2, false);
 925       test_reserved_space3(lp * 4, lp * 2, false);
 926       test_reserved_space3(lp * 8, lp * 2, false);
 927 
 928       // With large pages
 929       test_reserved_space3(lp, ag * 4    , true);
 930       test_reserved_space3(lp * 2, ag * 4, true);
 931       test_reserved_space3(lp * 4, ag * 4, true);
 932       test_reserved_space3(lp, lp        , true);
 933       test_reserved_space3(lp * 2, lp    , true);
 934       test_reserved_space3(lp * 3, lp    , true);
 935       test_reserved_space3(lp * 2, lp * 2, true);
 936       test_reserved_space3(lp * 4, lp * 2, true);
 937       test_reserved_space3(lp * 8, lp * 2, true);
 938     }
 939   }
 940 
 941   static void test_reserved_space() {
 942     test_reserved_space1();
 943     test_reserved_space2();
 944     test_reserved_space3();
 945   }
 946 };
 947 
 948 void TestReservedSpace_test() {
 949   TestReservedSpace::test_reserved_space();
 950 }
 951 
 952 #define assert_equals(actual, expected)     \
 953   assert(actual == expected,                \
 954     err_msg("Got " SIZE_FORMAT " expected " \
 955       SIZE_FORMAT, actual, expected));
 956 
 957 #define assert_ge(value1, value2)                  \
 958   assert(value1 >= value2,                         \
 959     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
 960       #value2 "': " SIZE_FORMAT, value1, value2));
 961 
 962 #define assert_lt(value1, value2)                  \
 963   assert(value1 < value2,                          \
 964     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
 965       #value2 "': " SIZE_FORMAT, value1, value2));
 966 
 967 
 968 class TestVirtualSpace : AllStatic {
 969  public:
 970   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size) {
 971     size_t granularity = os::vm_allocation_granularity();
 972     size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
 973 
 974     ReservedSpace reserved(reserve_size_aligned);
 975 
 976     assert(reserved.is_reserved(), "Must be");
 977 
 978     VirtualSpace vs;
 979     bool initialized = vs.initialize(reserved, 0);
 980     assert(initialized, "Failed to initialize VirtualSpace");
 981 
 982     vs.expand_by(commit_size, false);
 983 
 984     if (vs.special()) {
 985       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
 986     } else {
 987       assert_ge(vs.actual_committed_size(), commit_size);
 988       // Approximate the commit granularity.
 989       size_t commit_granularity = UseLargePages ? os::large_page_size() : os::vm_page_size();
 990       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
 991     }
 992 
 993     reserved.release();
 994   }
 995 
 996   static void test_virtual_space_actual_committed_space_one_large_page() {
 997     if (!UseLargePages) {
 998       return;
 999     }
1000 
1001     size_t large_page_size = os::large_page_size();
1002 
1003     ReservedSpace reserved(large_page_size, large_page_size, true, false);
1004 
1005     assert(reserved.is_reserved(), "Must be");
1006 
1007     VirtualSpace vs;
1008     bool initialized = vs.initialize(reserved, 0);
1009     assert(initialized, "Failed to initialize VirtualSpace");
1010 
1011     vs.expand_by(large_page_size, false);
1012 
1013     assert_equals(vs.actual_committed_size(), large_page_size);
1014 
1015     reserved.release();
1016   }
1017 
1018   static void test_virtual_space_actual_committed_space() {
1019     test_virtual_space_actual_committed_space(4 * K, 0);
1020     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1021     test_virtual_space_actual_committed_space(8 * K, 0);
1022     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1023     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1024     test_virtual_space_actual_committed_space(12 * K, 0);
1025     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1026     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1027     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1028     test_virtual_space_actual_committed_space(64 * K, 0);
1029     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1030     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1031     test_virtual_space_actual_committed_space(2 * M, 0);
1032     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1033     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1034     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1035     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1036     test_virtual_space_actual_committed_space(10 * M, 0);
1037     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1038     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1039     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1040     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1041     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1042     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1043   }
1044 
1045   static void test_virtual_space() {
1046     test_virtual_space_actual_committed_space();
1047     test_virtual_space_actual_committed_space_one_large_page();
1048   }
1049 };
1050 
1051 void TestVirtualSpace_test() {
1052   TestVirtualSpace::test_virtual_space();
1053 }
1054 
1055 #endif // PRODUCT
1056 
1057 #endif