1 /*
   2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "oops/markOop.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "runtime/virtualspace.hpp"
  29 #include "services/memTracker.hpp"
  30 
  31 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  32 
  33 // ReservedSpace
  34 
  35 // Dummy constructor
  36 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  37     _alignment(0), _special(false), _executable(false) {
  38 }
  39 
  40 ReservedSpace::ReservedSpace(size_t size) {
  41   size_t page_size = os::page_size_for_region(size, 1);
  42   bool large_pages = page_size != (size_t)os::vm_page_size();
  43   // Don't force the alignment to be large page aligned,
  44   // since that will waste memory.
  45   size_t alignment = os::vm_allocation_granularity();
  46   initialize(size, alignment, large_pages, NULL, 0, false);
  47 }
  48 
  49 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  50                              bool large,
  51                              char* requested_address,
  52                              const size_t noaccess_prefix) {
  53   initialize(size+noaccess_prefix, alignment, large, requested_address,
  54              noaccess_prefix, false);
  55 }
  56 
  57 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  58                              bool large,
  59                              bool executable) {
  60   initialize(size, alignment, large, NULL, 0, executable);
  61 }
  62 
  63 // Helper method.
  64 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  65                                            const size_t size, bool special)
  66 {
  67   if (base == requested_address || requested_address == NULL)
  68     return false; // did not fail
  69 
  70   if (base != NULL) {
  71     // Different reserve address may be acceptable in other cases
  72     // but for compressed oops heap should be at requested address.
  73     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  74     if (PrintCompressedOopsMode) {
  75       tty->cr();
  76       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
  77     }
  78     // OS ignored requested address. Try different address.
  79     if (special) {
  80       if (!os::release_memory_special(base, size)) {
  81         fatal("os::release_memory_special failed");
  82       }
  83     } else {
  84       if (!os::release_memory(base, size)) {
  85         fatal("os::release_memory failed");
  86       }
  87     }
  88   }
  89   return true;
  90 }
  91 
  92 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
  93                                char* requested_address,
  94                                const size_t noaccess_prefix,
  95                                bool executable) {
  96   const size_t granularity = os::vm_allocation_granularity();
  97   assert((size & (granularity - 1)) == 0,
  98          "size not aligned to os::vm_allocation_granularity()");
  99   assert((alignment & (granularity - 1)) == 0,
 100          "alignment not aligned to os::vm_allocation_granularity()");
 101   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 102          "not a power of 2");
 103 
 104   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 105 
 106   // Assert that if noaccess_prefix is used, it is the same as alignment.
 107   assert(noaccess_prefix == 0 ||
 108          noaccess_prefix == alignment, "noaccess prefix wrong");
 109 
 110   _base = NULL;
 111   _size = 0;
 112   _special = false;
 113   _executable = executable;
 114   _alignment = 0;
 115   _noaccess_prefix = 0;
 116   if (size == 0) {
 117     return;
 118   }
 119 
 120   // If OS doesn't support demand paging for large page memory, we need
 121   // to use reserve_memory_special() to reserve and pin the entire region.
 122   bool special = large && !os::can_commit_large_page_memory();
 123   char* base = NULL;
 124 
 125   if (requested_address != 0) {
 126     requested_address -= noaccess_prefix; // adjust requested address
 127     assert(requested_address != NULL, "huge noaccess prefix?");
 128   }
 129 
 130   if (special) {
 131 
 132     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 133 
 134     if (base != NULL) {
 135       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 136         // OS ignored requested address. Try different address.
 137         return;
 138       }
 139       // Check alignment constraints.
 140       assert((uintptr_t) base % alignment == 0,
 141              err_msg("Large pages returned a non-aligned address, base: "
 142                  PTR_FORMAT " alignment: " PTR_FORMAT,
 143                  base, (void*)(uintptr_t)alignment));
 144       _special = true;
 145     } else {
 146       // failed; try to reserve regular memory below
 147       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 148                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 149         if (PrintCompressedOopsMode) {
 150           tty->cr();
 151           tty->print_cr("Reserve regular memory without large pages.");
 152         }
 153       }
 154     }
 155   }
 156 
 157   if (base == NULL) {
 158     // Optimistically assume that the OSes returns an aligned base pointer.
 159     // When reserving a large address range, most OSes seem to align to at
 160     // least 64K.
 161 
 162     // If the memory was requested at a particular address, use
 163     // os::attempt_reserve_memory_at() to avoid over mapping something
 164     // important.  If available space is not detected, return NULL.
 165 
 166     if (requested_address != 0) {
 167       base = os::attempt_reserve_memory_at(size, requested_address);
 168       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 169         // OS ignored requested address. Try different address.
 170         base = NULL;
 171       }
 172     } else {
 173       base = os::reserve_memory(size, NULL, alignment);
 174     }
 175 
 176     if (base == NULL) return;
 177 
 178     // Check alignment constraints
 179     if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
 180       // Base not aligned, retry
 181       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 182       // Make sure that size is aligned
 183       size = align_size_up(size, alignment);
 184       base = os::reserve_memory_aligned(size, alignment);
 185 
 186       if (requested_address != 0 &&
 187           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 188         // As a result of the alignment constraints, the allocated base differs
 189         // from the requested address. Return back to the caller who can
 190         // take remedial action (like try again without a requested address).
 191         assert(_base == NULL, "should be");
 192         return;
 193       }
 194     }
 195   }
 196   // Done
 197   _base = base;
 198   _size = size;
 199   _alignment = alignment;
 200   _noaccess_prefix = noaccess_prefix;
 201 
 202   // Assert that if noaccess_prefix is used, it is the same as alignment.
 203   assert(noaccess_prefix == 0 ||
 204          noaccess_prefix == _alignment, "noaccess prefix wrong");
 205 
 206   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 207          "area must be distinguishable from marks for mark-sweep");
 208   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 209          "area must be distinguishable from marks for mark-sweep");
 210 }
 211 
 212 
 213 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 214                              bool special, bool executable) {
 215   assert((size % os::vm_allocation_granularity()) == 0,
 216          "size not allocation aligned");
 217   _base = base;
 218   _size = size;
 219   _alignment = alignment;
 220   _noaccess_prefix = 0;
 221   _special = special;
 222   _executable = executable;
 223 }
 224 
 225 
 226 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 227                                         bool split, bool realloc) {
 228   assert(partition_size <= size(), "partition failed");
 229   if (split) {
 230     os::split_reserved_memory(base(), size(), partition_size, realloc);
 231   }
 232   ReservedSpace result(base(), partition_size, alignment, special(),
 233                        executable());
 234   return result;
 235 }
 236 
 237 
 238 ReservedSpace
 239 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 240   assert(partition_size <= size(), "partition failed");
 241   ReservedSpace result(base() + partition_size, size() - partition_size,
 242                        alignment, special(), executable());
 243   return result;
 244 }
 245 
 246 
 247 size_t ReservedSpace::page_align_size_up(size_t size) {
 248   return align_size_up(size, os::vm_page_size());
 249 }
 250 
 251 
 252 size_t ReservedSpace::page_align_size_down(size_t size) {
 253   return align_size_down(size, os::vm_page_size());
 254 }
 255 
 256 
 257 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 258   return align_size_up(size, os::vm_allocation_granularity());
 259 }
 260 
 261 
 262 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 263   return align_size_down(size, os::vm_allocation_granularity());
 264 }
 265 
 266 
 267 void ReservedSpace::release() {
 268   if (is_reserved()) {
 269     char *real_base = _base - _noaccess_prefix;
 270     const size_t real_size = _size + _noaccess_prefix;
 271     if (special()) {
 272       os::release_memory_special(real_base, real_size);
 273     } else{
 274       os::release_memory(real_base, real_size);
 275     }
 276     _base = NULL;
 277     _size = 0;
 278     _noaccess_prefix = 0;
 279     _special = false;
 280     _executable = false;
 281   }
 282 }
 283 
 284 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
 285   assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
 286                                       (Universe::narrow_oop_base() != NULL) &&
 287                                       Universe::narrow_oop_use_implicit_null_checks()),
 288          "noaccess_prefix should be used only with non zero based compressed oops");
 289 
 290   // If there is no noaccess prefix, return.
 291   if (_noaccess_prefix == 0) return;
 292 
 293   assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
 294          "must be at least page size big");
 295 
 296   // Protect memory at the base of the allocated region.
 297   // If special, the page was committed (only matters on windows)
 298   if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
 299                           _special)) {
 300     fatal("cannot protect protection page");
 301   }
 302   if (PrintCompressedOopsMode) {
 303     tty->cr();
 304     tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
 305   }
 306 
 307   _base += _noaccess_prefix;
 308   _size -= _noaccess_prefix;
 309   assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
 310          "must be exactly of required size and alignment");
 311 }
 312 
 313 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
 314                                      bool large, char* requested_address) :
 315   ReservedSpace(size, alignment, large,
 316                 requested_address,
 317                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
 318                  Universe::narrow_oop_use_implicit_null_checks()) ?
 319                   lcm(os::vm_page_size(), alignment) : 0) {
 320   if (base() > 0) {
 321     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 322   }
 323 
 324   // Only reserved space for the java heap should have a noaccess_prefix
 325   // if using compressed oops.
 326   protect_noaccess_prefix(size);
 327 }
 328 
 329 // Reserve space for code segment.  Same as Java heap only we mark this as
 330 // executable.
 331 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 332                                      size_t rs_align,
 333                                      bool large) :
 334   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 335   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 336 }
 337 
 338 // VirtualSpace
 339 
 340 VirtualSpace::VirtualSpace() {
 341   _low_boundary           = NULL;
 342   _high_boundary          = NULL;
 343   _low                    = NULL;
 344   _high                   = NULL;
 345   _lower_high             = NULL;
 346   _middle_high            = NULL;
 347   _upper_high             = NULL;
 348   _lower_high_boundary    = NULL;
 349   _middle_high_boundary   = NULL;
 350   _upper_high_boundary    = NULL;
 351   _lower_alignment        = 0;
 352   _middle_alignment       = 0;
 353   _upper_alignment        = 0;
 354   _special                = false;
 355   _executable             = false;
 356 }
 357 
 358 
 359 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 360   const size_t max_commit_granularity = os::page_size_for_region(rs.size(), 1);
 361   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 362 }
 363 
 364 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 365   if(!rs.is_reserved()) return false;  // allocation failed.
 366   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 367   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 368 
 369   _low_boundary  = rs.base();
 370   _high_boundary = low_boundary() + rs.size();
 371 
 372   _low = low_boundary();
 373   _high = low();
 374 
 375   _special = rs.special();
 376   _executable = rs.executable();
 377 
 378   // When a VirtualSpace begins life at a large size, make all future expansion
 379   // and shrinking occur aligned to a granularity of large pages.  This avoids
 380   // fragmentation of physical addresses that inhibits the use of large pages
 381   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 382   // page size, the only spaces that get handled this way are codecache and
 383   // the heap itself, both of which provide a substantial performance
 384   // boost in many benchmarks when covered by large pages.
 385   //
 386   // No attempt is made to force large page alignment at the very top and
 387   // bottom of the space if they are not aligned so already.
 388   _lower_alignment  = os::vm_page_size();
 389   _middle_alignment = max_commit_granularity;
 390   _upper_alignment  = os::vm_page_size();
 391 
 392   // End of each region
 393   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
 394   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
 395   _upper_high_boundary = high_boundary();
 396 
 397   // High address of each region
 398   _lower_high = low_boundary();
 399   _middle_high = lower_high_boundary();
 400   _upper_high = middle_high_boundary();
 401 
 402   // commit to initial size
 403   if (committed_size > 0) {
 404     if (!expand_by(committed_size)) {
 405       return false;
 406     }
 407   }
 408   return true;
 409 }
 410 
 411 
 412 VirtualSpace::~VirtualSpace() {
 413   release();
 414 }
 415 
 416 
 417 void VirtualSpace::release() {
 418   // This does not release memory it never reserved.
 419   // Caller must release via rs.release();
 420   _low_boundary           = NULL;
 421   _high_boundary          = NULL;
 422   _low                    = NULL;
 423   _high                   = NULL;
 424   _lower_high             = NULL;
 425   _middle_high            = NULL;
 426   _upper_high             = NULL;
 427   _lower_high_boundary    = NULL;
 428   _middle_high_boundary   = NULL;
 429   _upper_high_boundary    = NULL;
 430   _lower_alignment        = 0;
 431   _middle_alignment       = 0;
 432   _upper_alignment        = 0;
 433   _special                = false;
 434   _executable             = false;
 435 }
 436 
 437 
 438 size_t VirtualSpace::committed_size() const {
 439   return pointer_delta(high(), low(), sizeof(char));
 440 }
 441 
 442 
 443 size_t VirtualSpace::reserved_size() const {
 444   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 445 }
 446 
 447 
 448 size_t VirtualSpace::uncommitted_size()  const {
 449   return reserved_size() - committed_size();
 450 }
 451 
 452 size_t VirtualSpace::actual_committed_size() const {
 453   // Special VirtualSpaces commit all reserved space up front.
 454   if (special()) {
 455     return reserved_size();
 456   }
 457 
 458   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 459   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 460   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 461 
 462 #ifdef ASSERT
 463   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 464   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 465   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 466 
 467   if (committed_high > 0) {
 468     assert(committed_low == lower, "Must be");
 469     assert(committed_middle == middle, "Must be");
 470   }
 471 
 472   if (committed_middle > 0) {
 473     assert(committed_low == lower, "Must be");
 474   }
 475   if (committed_middle < middle) {
 476     assert(committed_high == 0, "Must be");
 477   }
 478 
 479   if (committed_low < lower) {
 480     assert(committed_high == 0, "Must be");
 481     assert(committed_middle == 0, "Must be");
 482   }
 483 #endif
 484 
 485   return committed_low + committed_middle + committed_high;
 486 }
 487 
 488 
 489 bool VirtualSpace::contains(const void* p) const {
 490   return low() <= (const char*) p && (const char*) p < high();
 491 }
 492 
 493 /*
 494    First we need to determine if a particular virtual space is using large
 495    pages.  This is done at the initialize function and only virtual spaces
 496    that are larger than LargePageSizeInBytes use large pages.  Once we
 497    have determined this, all expand_by and shrink_by calls must grow and
 498    shrink by large page size chunks.  If a particular request
 499    is within the current large page, the call to commit and uncommit memory
 500    can be ignored.  In the case that the low and high boundaries of this
 501    space is not large page aligned, the pages leading to the first large
 502    page address and the pages after the last large page address must be
 503    allocated with default pages.
 504 */
 505 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 506   if (uncommitted_size() < bytes) return false;
 507 
 508   if (special()) {
 509     // don't commit memory if the entire space is pinned in memory
 510     _high += bytes;
 511     return true;
 512   }
 513 
 514   char* previous_high = high();
 515   char* unaligned_new_high = high() + bytes;
 516   assert(unaligned_new_high <= high_boundary(),
 517          "cannot expand by more than upper boundary");
 518 
 519   // Calculate where the new high for each of the regions should be.  If
 520   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 521   // then the unaligned lower and upper new highs would be the
 522   // lower_high() and upper_high() respectively.
 523   char* unaligned_lower_new_high =
 524     MIN2(unaligned_new_high, lower_high_boundary());
 525   char* unaligned_middle_new_high =
 526     MIN2(unaligned_new_high, middle_high_boundary());
 527   char* unaligned_upper_new_high =
 528     MIN2(unaligned_new_high, upper_high_boundary());
 529 
 530   // Align the new highs based on the regions alignment.  lower and upper
 531   // alignment will always be default page size.  middle alignment will be
 532   // LargePageSizeInBytes if the actual size of the virtual space is in
 533   // fact larger than LargePageSizeInBytes.
 534   char* aligned_lower_new_high =
 535     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 536   char* aligned_middle_new_high =
 537     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 538   char* aligned_upper_new_high =
 539     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 540 
 541   // Determine which regions need to grow in this expand_by call.
 542   // If you are growing in the lower region, high() must be in that
 543   // region so calculate the size based on high().  For the middle and
 544   // upper regions, determine the starting point of growth based on the
 545   // location of high().  By getting the MAX of the region's low address
 546   // (or the previous region's high address) and high(), we can tell if it
 547   // is an intra or inter region growth.
 548   size_t lower_needs = 0;
 549   if (aligned_lower_new_high > lower_high()) {
 550     lower_needs =
 551       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 552   }
 553   size_t middle_needs = 0;
 554   if (aligned_middle_new_high > middle_high()) {
 555     middle_needs =
 556       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 557   }
 558   size_t upper_needs = 0;
 559   if (aligned_upper_new_high > upper_high()) {
 560     upper_needs =
 561       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 562   }
 563 
 564   // Check contiguity.
 565   assert(low_boundary() <= lower_high() &&
 566          lower_high() <= lower_high_boundary(),
 567          "high address must be contained within the region");
 568   assert(lower_high_boundary() <= middle_high() &&
 569          middle_high() <= middle_high_boundary(),
 570          "high address must be contained within the region");
 571   assert(middle_high_boundary() <= upper_high() &&
 572          upper_high() <= upper_high_boundary(),
 573          "high address must be contained within the region");
 574 
 575   // Commit regions
 576   if (lower_needs > 0) {
 577     assert(low_boundary() <= lower_high() &&
 578            lower_high() + lower_needs <= lower_high_boundary(),
 579            "must not expand beyond region");
 580     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
 581       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 582                          ", lower_needs=" SIZE_FORMAT ", %d) failed",
 583                          lower_high(), lower_needs, _executable);)
 584       return false;
 585     } else {
 586       _lower_high += lower_needs;
 587     }
 588   }
 589   if (middle_needs > 0) {
 590     assert(lower_high_boundary() <= middle_high() &&
 591            middle_high() + middle_needs <= middle_high_boundary(),
 592            "must not expand beyond region");
 593     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
 594                            _executable)) {
 595       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 596                          ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
 597                          ", %d) failed", middle_high(), middle_needs,
 598                          middle_alignment(), _executable);)
 599       return false;
 600     }
 601     _middle_high += middle_needs;
 602   }
 603   if (upper_needs > 0) {
 604     assert(middle_high_boundary() <= upper_high() &&
 605            upper_high() + upper_needs <= upper_high_boundary(),
 606            "must not expand beyond region");
 607     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
 608       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 609                          ", upper_needs=" SIZE_FORMAT ", %d) failed",
 610                          upper_high(), upper_needs, _executable);)
 611       return false;
 612     } else {
 613       _upper_high += upper_needs;
 614     }
 615   }
 616 
 617   if (pre_touch || AlwaysPreTouch) {
 618     os::pretouch_memory(previous_high, unaligned_new_high);
 619   }
 620 
 621   _high += bytes;
 622   return true;
 623 }
 624 
 625 // A page is uncommitted if the contents of the entire page is deemed unusable.
 626 // Continue to decrement the high() pointer until it reaches a page boundary
 627 // in which case that particular page can now be uncommitted.
 628 void VirtualSpace::shrink_by(size_t size) {
 629   if (committed_size() < size)
 630     fatal("Cannot shrink virtual space to negative size");
 631 
 632   if (special()) {
 633     // don't uncommit if the entire space is pinned in memory
 634     _high -= size;
 635     return;
 636   }
 637 
 638   char* unaligned_new_high = high() - size;
 639   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 640 
 641   // Calculate new unaligned address
 642   char* unaligned_upper_new_high =
 643     MAX2(unaligned_new_high, middle_high_boundary());
 644   char* unaligned_middle_new_high =
 645     MAX2(unaligned_new_high, lower_high_boundary());
 646   char* unaligned_lower_new_high =
 647     MAX2(unaligned_new_high, low_boundary());
 648 
 649   // Align address to region's alignment
 650   char* aligned_upper_new_high =
 651     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 652   char* aligned_middle_new_high =
 653     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 654   char* aligned_lower_new_high =
 655     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 656 
 657   // Determine which regions need to shrink
 658   size_t upper_needs = 0;
 659   if (aligned_upper_new_high < upper_high()) {
 660     upper_needs =
 661       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 662   }
 663   size_t middle_needs = 0;
 664   if (aligned_middle_new_high < middle_high()) {
 665     middle_needs =
 666       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 667   }
 668   size_t lower_needs = 0;
 669   if (aligned_lower_new_high < lower_high()) {
 670     lower_needs =
 671       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 672   }
 673 
 674   // Check contiguity.
 675   assert(middle_high_boundary() <= upper_high() &&
 676          upper_high() <= upper_high_boundary(),
 677          "high address must be contained within the region");
 678   assert(lower_high_boundary() <= middle_high() &&
 679          middle_high() <= middle_high_boundary(),
 680          "high address must be contained within the region");
 681   assert(low_boundary() <= lower_high() &&
 682          lower_high() <= lower_high_boundary(),
 683          "high address must be contained within the region");
 684 
 685   // Uncommit
 686   if (upper_needs > 0) {
 687     assert(middle_high_boundary() <= aligned_upper_new_high &&
 688            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 689            "must not shrink beyond region");
 690     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 691       debug_only(warning("os::uncommit_memory failed"));
 692       return;
 693     } else {
 694       _upper_high -= upper_needs;
 695     }
 696   }
 697   if (middle_needs > 0) {
 698     assert(lower_high_boundary() <= aligned_middle_new_high &&
 699            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 700            "must not shrink beyond region");
 701     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 702       debug_only(warning("os::uncommit_memory failed"));
 703       return;
 704     } else {
 705       _middle_high -= middle_needs;
 706     }
 707   }
 708   if (lower_needs > 0) {
 709     assert(low_boundary() <= aligned_lower_new_high &&
 710            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 711            "must not shrink beyond region");
 712     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 713       debug_only(warning("os::uncommit_memory failed"));
 714       return;
 715     } else {
 716       _lower_high -= lower_needs;
 717     }
 718   }
 719 
 720   _high -= size;
 721 }
 722 
 723 #ifndef PRODUCT
 724 void VirtualSpace::check_for_contiguity() {
 725   // Check contiguity.
 726   assert(low_boundary() <= lower_high() &&
 727          lower_high() <= lower_high_boundary(),
 728          "high address must be contained within the region");
 729   assert(lower_high_boundary() <= middle_high() &&
 730          middle_high() <= middle_high_boundary(),
 731          "high address must be contained within the region");
 732   assert(middle_high_boundary() <= upper_high() &&
 733          upper_high() <= upper_high_boundary(),
 734          "high address must be contained within the region");
 735   assert(low() >= low_boundary(), "low");
 736   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
 737   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
 738   assert(high() <= upper_high(), "upper high");
 739 }
 740 
 741 void VirtualSpace::print_on(outputStream* out) {
 742   out->print   ("Virtual space:");
 743   if (special()) out->print(" (pinned in memory)");
 744   out->cr();
 745   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
 746   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
 747   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
 748   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
 749 }
 750 
 751 void VirtualSpace::print() {
 752   print_on(tty);
 753 }
 754 
 755 /////////////// Unit tests ///////////////
 756 
 757 #ifndef PRODUCT
 758 
 759 #define test_log(...) \
 760   do {\
 761     if (VerboseInternalVMTests) { \
 762       tty->print_cr(__VA_ARGS__); \
 763       tty->flush(); \
 764     }\
 765   } while (false)
 766 
 767 class TestReservedSpace : AllStatic {
 768  public:
 769   static void small_page_write(void* addr, size_t size) {
 770     size_t page_size = os::vm_page_size();
 771 
 772     char* end = (char*)addr + size;
 773     for (char* p = (char*)addr; p < end; p += page_size) {
 774       *p = 1;
 775     }
 776   }
 777 
 778   static void release_memory_for_test(ReservedSpace rs) {
 779     if (rs.special()) {
 780       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
 781     } else {
 782       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
 783     }
 784   }
 785 
 786   static void test_reserved_space1(size_t size, size_t alignment) {
 787     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
 788 
 789     assert(is_size_aligned(size, alignment), "Incorrect input parameters");
 790 
 791     ReservedSpace rs(size,          // size
 792                      alignment,     // alignment
 793                      UseLargePages, // large
 794                      NULL,          // requested_address
 795                      0);            // noacces_prefix
 796 
 797     test_log(" rs.special() == %d", rs.special());
 798 
 799     assert(rs.base() != NULL, "Must be");
 800     assert(rs.size() == size, "Must be");
 801 
 802     assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
 803     assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
 804 
 805     if (rs.special()) {
 806       small_page_write(rs.base(), size);
 807     }
 808 
 809     release_memory_for_test(rs);
 810   }
 811 
 812   static void test_reserved_space2(size_t size) {
 813     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
 814 
 815     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
 816 
 817     ReservedSpace rs(size);
 818 
 819     test_log(" rs.special() == %d", rs.special());
 820 
 821     assert(rs.base() != NULL, "Must be");
 822     assert(rs.size() == size, "Must be");
 823 
 824     if (rs.special()) {
 825       small_page_write(rs.base(), size);
 826     }
 827 
 828     release_memory_for_test(rs);
 829   }
 830 
 831   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
 832     test_log("test_reserved_space3(%p, %p, %d)",
 833         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
 834 
 835     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
 836     assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
 837 
 838     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
 839 
 840     ReservedSpace rs(size, alignment, large, false);
 841 
 842     test_log(" rs.special() == %d", rs.special());
 843 
 844     assert(rs.base() != NULL, "Must be");
 845     assert(rs.size() == size, "Must be");
 846 
 847     if (rs.special()) {
 848       small_page_write(rs.base(), size);
 849     }
 850 
 851     release_memory_for_test(rs);
 852   }
 853 
 854 
 855   static void test_reserved_space1() {
 856     size_t size = 2 * 1024 * 1024;
 857     size_t ag   = os::vm_allocation_granularity();
 858 
 859     test_reserved_space1(size,      ag);
 860     test_reserved_space1(size * 2,  ag);
 861     test_reserved_space1(size * 10, ag);
 862   }
 863 
 864   static void test_reserved_space2() {
 865     size_t size = 2 * 1024 * 1024;
 866     size_t ag = os::vm_allocation_granularity();
 867 
 868     test_reserved_space2(size * 1);
 869     test_reserved_space2(size * 2);
 870     test_reserved_space2(size * 10);
 871     test_reserved_space2(ag);
 872     test_reserved_space2(size - ag);
 873     test_reserved_space2(size);
 874     test_reserved_space2(size + ag);
 875     test_reserved_space2(size * 2);
 876     test_reserved_space2(size * 2 - ag);
 877     test_reserved_space2(size * 2 + ag);
 878     test_reserved_space2(size * 3);
 879     test_reserved_space2(size * 3 - ag);
 880     test_reserved_space2(size * 3 + ag);
 881     test_reserved_space2(size * 10);
 882     test_reserved_space2(size * 10 + size / 2);
 883   }
 884 
 885   static void test_reserved_space3() {
 886     size_t ag = os::vm_allocation_granularity();
 887 
 888     test_reserved_space3(ag,      ag    , false);
 889     test_reserved_space3(ag * 2,  ag    , false);
 890     test_reserved_space3(ag * 3,  ag    , false);
 891     test_reserved_space3(ag * 2,  ag * 2, false);
 892     test_reserved_space3(ag * 4,  ag * 2, false);
 893     test_reserved_space3(ag * 8,  ag * 2, false);
 894     test_reserved_space3(ag * 4,  ag * 4, false);
 895     test_reserved_space3(ag * 8,  ag * 4, false);
 896     test_reserved_space3(ag * 16, ag * 4, false);
 897 
 898     if (UseLargePages) {
 899       size_t lp = os::large_page_size();
 900 
 901       // Without large pages
 902       test_reserved_space3(lp,     ag * 4, false);
 903       test_reserved_space3(lp * 2, ag * 4, false);
 904       test_reserved_space3(lp * 4, ag * 4, false);
 905       test_reserved_space3(lp,     lp    , false);
 906       test_reserved_space3(lp * 2, lp    , false);
 907       test_reserved_space3(lp * 3, lp    , false);
 908       test_reserved_space3(lp * 2, lp * 2, false);
 909       test_reserved_space3(lp * 4, lp * 2, false);
 910       test_reserved_space3(lp * 8, lp * 2, false);
 911 
 912       // With large pages
 913       test_reserved_space3(lp, ag * 4    , true);
 914       test_reserved_space3(lp * 2, ag * 4, true);
 915       test_reserved_space3(lp * 4, ag * 4, true);
 916       test_reserved_space3(lp, lp        , true);
 917       test_reserved_space3(lp * 2, lp    , true);
 918       test_reserved_space3(lp * 3, lp    , true);
 919       test_reserved_space3(lp * 2, lp * 2, true);
 920       test_reserved_space3(lp * 4, lp * 2, true);
 921       test_reserved_space3(lp * 8, lp * 2, true);
 922     }
 923   }
 924 
 925   static void test_reserved_space() {
 926     test_reserved_space1();
 927     test_reserved_space2();
 928     test_reserved_space3();
 929   }
 930 };
 931 
 932 void TestReservedSpace_test() {
 933   TestReservedSpace::test_reserved_space();
 934 }
 935 
 936 #define assert_equals(actual, expected)     \
 937   assert(actual == expected,                \
 938     err_msg("Got " SIZE_FORMAT " expected " \
 939       SIZE_FORMAT, actual, expected));
 940 
 941 #define assert_ge(value1, value2)                  \
 942   assert(value1 >= value2,                         \
 943     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
 944       #value2 "': " SIZE_FORMAT, value1, value2));
 945 
 946 #define assert_lt(value1, value2)                  \
 947   assert(value1 < value2,                          \
 948     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
 949       #value2 "': " SIZE_FORMAT, value1, value2));
 950 
 951 
 952 class TestVirtualSpace : AllStatic {
 953   enum TestLargePages {
 954     Default,
 955     Disable,
 956     Reserve,
 957     Commit
 958   };
 959 
 960   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
 961     switch(mode) {
 962     default:
 963     case Default:
 964     case Reserve:
 965       return ReservedSpace(reserve_size_aligned);
 966     case Disable:
 967     case Commit:
 968       return ReservedSpace(reserve_size_aligned,
 969                            os::vm_allocation_granularity(),
 970                            /* large */ false, /* exec */ false);
 971     }
 972   }
 973 
 974   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
 975     switch(mode) {
 976     default:
 977     case Default:
 978     case Reserve:
 979       return vs.initialize(rs, 0);
 980     case Disable:
 981       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
 982     case Commit:
 983       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region(rs.size(), 1));
 984     }
 985   }
 986 
 987  public:
 988   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
 989                                                         TestLargePages mode = Default) {
 990     size_t granularity = os::vm_allocation_granularity();
 991     size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
 992 
 993     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
 994 
 995     assert(reserved.is_reserved(), "Must be");
 996 
 997     VirtualSpace vs;
 998     bool initialized = initialize_virtual_space(vs, reserved, mode);
 999     assert(initialized, "Failed to initialize VirtualSpace");
1000 
1001     vs.expand_by(commit_size, false);
1002 
1003     if (vs.special()) {
1004       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1005     } else {
1006       assert_ge(vs.actual_committed_size(), commit_size);
1007       // Approximate the commit granularity.
1008       // Make sure that we don't commit using large pages
1009       // if large pages has been disabled for this VirtualSpace.
1010       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1011                                    os::vm_page_size() : os::large_page_size();
1012       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1013     }
1014 
1015     reserved.release();
1016   }
1017 
1018   static void test_virtual_space_actual_committed_space_one_large_page() {
1019     if (!UseLargePages) {
1020       return;
1021     }
1022 
1023     size_t large_page_size = os::large_page_size();
1024 
1025     ReservedSpace reserved(large_page_size, large_page_size, true, false);
1026 
1027     assert(reserved.is_reserved(), "Must be");
1028 
1029     VirtualSpace vs;
1030     bool initialized = vs.initialize(reserved, 0);
1031     assert(initialized, "Failed to initialize VirtualSpace");
1032 
1033     vs.expand_by(large_page_size, false);
1034 
1035     assert_equals(vs.actual_committed_size(), large_page_size);
1036 
1037     reserved.release();
1038   }
1039 
1040   static void test_virtual_space_actual_committed_space() {
1041     test_virtual_space_actual_committed_space(4 * K, 0);
1042     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1043     test_virtual_space_actual_committed_space(8 * K, 0);
1044     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1045     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1046     test_virtual_space_actual_committed_space(12 * K, 0);
1047     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1048     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1049     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1050     test_virtual_space_actual_committed_space(64 * K, 0);
1051     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1052     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1053     test_virtual_space_actual_committed_space(2 * M, 0);
1054     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1055     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1056     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1057     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1058     test_virtual_space_actual_committed_space(10 * M, 0);
1059     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1060     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1061     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1062     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1063     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1064     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1065   }
1066 
1067   static void test_virtual_space_disable_large_pages() {
1068     if (!UseLargePages) {
1069       return;
1070     }
1071     // These test cases verify that if we force VirtualSpace to disable large pages
1072     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1073     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1074     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1075     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1076     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1077     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1078     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1079 
1080     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1081     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1082     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1083     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1084     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1085     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1086     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1087 
1088     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1089     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1090     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1091     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1092     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1093     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1094     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1095   }
1096 
1097   static void test_virtual_space() {
1098     test_virtual_space_actual_committed_space();
1099     test_virtual_space_actual_committed_space_one_large_page();
1100     test_virtual_space_disable_large_pages();
1101   }
1102 };
1103 
1104 void TestVirtualSpace_test() {
1105   TestVirtualSpace::test_virtual_space();
1106 }
1107 
1108 #endif // PRODUCT
1109 
1110 #endif