1 /*
   2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "oops/markOop.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "runtime/virtualspace.hpp"
  29 #include "services/memTracker.hpp"
  30 
  31 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  32 
  33 // ReservedSpace
  34 
  35 // Dummy constructor
  36 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  37     _alignment(0), _special(false), _executable(false) {
  38 }
  39 
  40 ReservedSpace::ReservedSpace(size_t size) {
  41   size_t page_size = os::page_size_for_region_aligned(size, 1);
  42   bool large_pages = page_size != (size_t)os::vm_page_size();
  43   // Don't force the alignment to be large page aligned,
  44   // since that will waste memory.
  45   size_t alignment = os::vm_allocation_granularity();
  46   initialize(size, alignment, large_pages, NULL, 0, false);
  47 }
  48 
  49 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  50                              bool large,
  51                              char* requested_address,
  52                              const size_t noaccess_prefix) {
  53   initialize(size+noaccess_prefix, alignment, large, requested_address,
  54              noaccess_prefix, false);
  55 }
  56 
  57 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  58                              bool large,
  59                              bool executable) {
  60   initialize(size, alignment, large, NULL, 0, executable);
  61 }
  62 
  63 // Helper method.
  64 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  65                                            const size_t size, bool special)
  66 {
  67   if (base == requested_address || requested_address == NULL)
  68     return false; // did not fail
  69 
  70   if (base != NULL) {
  71     // Different reserve address may be acceptable in other cases
  72     // but for compressed oops heap should be at requested address.
  73     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  74     if (PrintCompressedOopsMode) {
  75       tty->cr();
  76       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
  77     }
  78     // OS ignored requested address. Try different address.
  79     if (special) {
  80       if (!os::release_memory_special(base, size)) {
  81         fatal("os::release_memory_special failed");
  82       }
  83     } else {
  84       if (!os::release_memory(base, size)) {
  85         fatal("os::release_memory failed");
  86       }
  87     }
  88   }
  89   return true;
  90 }
  91 
  92 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
  93                                char* requested_address,
  94                                const size_t noaccess_prefix,
  95                                bool executable) {
  96   const size_t granularity = os::vm_allocation_granularity();
  97   assert((size & (granularity - 1)) == 0,
  98          "size not aligned to os::vm_allocation_granularity()");
  99   assert((alignment & (granularity - 1)) == 0,
 100          "alignment not aligned to os::vm_allocation_granularity()");
 101   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 102          "not a power of 2");
 103 
 104   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 105 
 106   // Assert that if noaccess_prefix is used, it is the same as alignment.
 107   assert(noaccess_prefix == 0 ||
 108          noaccess_prefix == alignment, "noaccess prefix wrong");
 109 
 110   _base = NULL;
 111   _size = 0;
 112   _special = false;
 113   _executable = executable;
 114   _alignment = 0;
 115   _noaccess_prefix = 0;
 116   if (size == 0) {
 117     return;
 118   }
 119 
 120   // If OS doesn't support demand paging for large page memory, we need
 121   // to use reserve_memory_special() to reserve and pin the entire region.
 122   bool special = large && !os::can_commit_large_page_memory();
 123   char* base = NULL;
 124 
 125   if (requested_address != 0) {
 126     requested_address -= noaccess_prefix; // adjust requested address
 127     assert(requested_address != NULL, "huge noaccess prefix?");
 128   }
 129 
 130   if (special) {
 131 
 132     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 133 
 134     if (base != NULL) {
 135       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 136         // OS ignored requested address. Try different address.
 137         return;
 138       }
 139       // Check alignment constraints.
 140       assert((uintptr_t) base % alignment == 0,
 141              err_msg("Large pages returned a non-aligned address, base: "
 142                  PTR_FORMAT " alignment: " PTR_FORMAT,
 143                  base, (void*)(uintptr_t)alignment));
 144       _special = true;
 145     } else {
 146       // failed; try to reserve regular memory below
 147       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 148                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 149         if (PrintCompressedOopsMode) {
 150           tty->cr();
 151           tty->print_cr("Reserve regular memory without large pages.");
 152         }
 153       }
 154     }
 155   }
 156 
 157   if (base == NULL) {
 158     // Optimistically assume that the OSes returns an aligned base pointer.
 159     // When reserving a large address range, most OSes seem to align to at
 160     // least 64K.
 161 
 162     // If the memory was requested at a particular address, use
 163     // os::attempt_reserve_memory_at() to avoid over mapping something
 164     // important.  If available space is not detected, return NULL.
 165 
 166     if (requested_address != 0) {
 167       base = os::attempt_reserve_memory_at(size, requested_address);
 168       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 169         // OS ignored requested address. Try different address.
 170         base = NULL;
 171       }
 172     } else {
 173       base = os::reserve_memory(size, NULL, alignment);
 174     }
 175 
 176     if (base == NULL) return;
 177 
 178     // Check alignment constraints
 179     if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
 180       // Base not aligned, retry
 181       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 182       // Make sure that size is aligned
 183       size = align_size_up(size, alignment);
 184       base = os::reserve_memory_aligned(size, alignment);
 185 
 186       if (requested_address != 0 &&
 187           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 188         // As a result of the alignment constraints, the allocated base differs
 189         // from the requested address. Return back to the caller who can
 190         // take remedial action (like try again without a requested address).
 191         assert(_base == NULL, "should be");
 192         return;
 193       }
 194     }
 195   }
 196   // Done
 197   _base = base;
 198   _size = size;
 199   _alignment = alignment;
 200   _noaccess_prefix = noaccess_prefix;
 201 
 202   // Assert that if noaccess_prefix is used, it is the same as alignment.
 203   assert(noaccess_prefix == 0 ||
 204          noaccess_prefix == _alignment, "noaccess prefix wrong");
 205 
 206   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 207          "area must be distinguishable from marks for mark-sweep");
 208   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 209          "area must be distinguishable from marks for mark-sweep");
 210 }
 211 
 212 
 213 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 214                              bool special, bool executable) {
 215   assert((size % os::vm_allocation_granularity()) == 0,
 216          "size not allocation aligned");
 217   _base = base;
 218   _size = size;
 219   _alignment = alignment;
 220   _noaccess_prefix = 0;
 221   _special = special;
 222   _executable = executable;
 223 }
 224 
 225 
 226 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 227                                         bool split, bool realloc) {
 228   assert(partition_size <= size(), "partition failed");
 229   if (split) {
 230     os::split_reserved_memory(base(), size(), partition_size, realloc);
 231   }
 232   ReservedSpace result(base(), partition_size, alignment, special(),
 233                        executable());
 234   return result;
 235 }
 236 
 237 
 238 ReservedSpace
 239 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 240   assert(partition_size <= size(), "partition failed");
 241   ReservedSpace result(base() + partition_size, size() - partition_size,
 242                        alignment, special(), executable());
 243   return result;
 244 }
 245 
 246 
 247 size_t ReservedSpace::page_align_size_up(size_t size) {
 248   return align_size_up(size, os::vm_page_size());
 249 }
 250 
 251 
 252 size_t ReservedSpace::page_align_size_down(size_t size) {
 253   return align_size_down(size, os::vm_page_size());
 254 }
 255 
 256 
 257 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 258   return align_size_up(size, os::vm_allocation_granularity());
 259 }
 260 
 261 
 262 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 263   return align_size_down(size, os::vm_allocation_granularity());
 264 }
 265 
 266 
 267 void ReservedSpace::release() {
 268   if (is_reserved()) {
 269     char *real_base = _base - _noaccess_prefix;
 270     const size_t real_size = _size + _noaccess_prefix;
 271     if (special()) {
 272       os::release_memory_special(real_base, real_size);
 273     } else{
 274       os::release_memory(real_base, real_size);
 275     }
 276     _base = NULL;
 277     _size = 0;
 278     _noaccess_prefix = 0;
 279     _special = false;
 280     _executable = false;
 281   }
 282 }
 283 
 284 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
 285   assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
 286                                       (Universe::narrow_oop_base() != NULL) &&
 287                                       Universe::narrow_oop_use_implicit_null_checks()),
 288          "noaccess_prefix should be used only with non zero based compressed oops");
 289 
 290   // If there is no noaccess prefix, return.
 291   if (_noaccess_prefix == 0) return;
 292 
 293   assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
 294          "must be at least page size big");
 295 
 296   // Protect memory at the base of the allocated region.
 297   // If special, the page was committed (only matters on windows)
 298   if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
 299                           _special)) {
 300     fatal("cannot protect protection page");
 301   }
 302   if (PrintCompressedOopsMode) {
 303     tty->cr();
 304     tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
 305   }
 306 
 307   _base += _noaccess_prefix;
 308   _size -= _noaccess_prefix;
 309   assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
 310          "must be exactly of required size and alignment");
 311 }
 312 
 313 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
 314                                      bool large, char* requested_address) :
 315   ReservedSpace(size, alignment, large,
 316                 requested_address,
 317                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
 318                  Universe::narrow_oop_use_implicit_null_checks()) ?
 319                   lcm(os::vm_page_size(), alignment) : 0) {
 320   if (base() > 0) {
 321     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 322   }
 323 
 324   // Only reserved space for the java heap should have a noaccess_prefix
 325   // if using compressed oops.
 326   protect_noaccess_prefix(size);
 327 }
 328 
 329 // Reserve space for code segment.  Same as Java heap only we mark this as
 330 // executable.
 331 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 332                                      size_t rs_align,
 333                                      bool large) :
 334   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 335   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 336 }
 337 
 338 // VirtualSpace
 339 
 340 VirtualSpace::VirtualSpace() {
 341   _low_boundary           = NULL;
 342   _high_boundary          = NULL;
 343   _low                    = NULL;
 344   _high                   = NULL;
 345   _lower_high             = NULL;
 346   _middle_high            = NULL;
 347   _upper_high             = NULL;
 348   _lower_high_boundary    = NULL;
 349   _middle_high_boundary   = NULL;
 350   _upper_high_boundary    = NULL;
 351   _lower_alignment        = 0;
 352   _middle_alignment       = 0;
 353   _upper_alignment        = 0;
 354   _special                = false;
 355   _executable             = false;
 356 }
 357 
 358 
 359 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 360   const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
 361   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 362 }
 363 
 364 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 365   if(!rs.is_reserved()) return false;  // allocation failed.
 366   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 367   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 368 
 369   _low_boundary  = rs.base();
 370   _high_boundary = low_boundary() + rs.size();
 371 
 372   _low = low_boundary();
 373   _high = low();
 374 
 375   _special = rs.special();
 376   _executable = rs.executable();
 377 
 378   // When a VirtualSpace begins life at a large size, make all future expansion
 379   // and shrinking occur aligned to a granularity of large pages.  This avoids
 380   // fragmentation of physical addresses that inhibits the use of large pages
 381   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 382   // page size, the only spaces that get handled this way are codecache and
 383   // the heap itself, both of which provide a substantial performance
 384   // boost in many benchmarks when covered by large pages.
 385   //
 386   // No attempt is made to force large page alignment at the very top and
 387   // bottom of the space if they are not aligned so already.
 388   _lower_alignment  = os::vm_page_size();
 389   _middle_alignment = max_commit_granularity;
 390   _upper_alignment  = os::vm_page_size();
 391 
 392   // End of each region
 393   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
 394   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
 395   _upper_high_boundary = high_boundary();
 396 
 397   // High address of each region
 398   _lower_high = low_boundary();
 399   _middle_high = lower_high_boundary();
 400   _upper_high = middle_high_boundary();
 401 
 402   // commit to initial size
 403   if (committed_size > 0) {
 404     if (!expand_by(committed_size)) {
 405       return false;
 406     }
 407   }
 408   return true;
 409 }
 410 
 411 
 412 VirtualSpace::~VirtualSpace() {
 413   release();
 414 }
 415 
 416 
 417 void VirtualSpace::release() {
 418   // This does not release memory it never reserved.
 419   // Caller must release via rs.release();
 420   _low_boundary           = NULL;
 421   _high_boundary          = NULL;
 422   _low                    = NULL;
 423   _high                   = NULL;
 424   _lower_high             = NULL;
 425   _middle_high            = NULL;
 426   _upper_high             = NULL;
 427   _lower_high_boundary    = NULL;
 428   _middle_high_boundary   = NULL;
 429   _upper_high_boundary    = NULL;
 430   _lower_alignment        = 0;
 431   _middle_alignment       = 0;
 432   _upper_alignment        = 0;
 433   _special                = false;
 434   _executable             = false;
 435 }
 436 
 437 
 438 size_t VirtualSpace::committed_size() const {
 439   return pointer_delta(high(), low(), sizeof(char));
 440 }
 441 
 442 
 443 size_t VirtualSpace::reserved_size() const {
 444   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 445 }
 446 
 447 
 448 size_t VirtualSpace::uncommitted_size()  const {
 449   return reserved_size() - committed_size();
 450 }
 451 
 452 size_t VirtualSpace::actual_committed_size() const {
 453   // Special VirtualSpaces commit all reserved space up front.
 454   if (special()) {
 455     return reserved_size();
 456   }
 457 
 458   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 459   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 460   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 461 
 462 #ifdef ASSERT
 463   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 464   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 465   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 466 
 467   if (committed_high > 0) {
 468     assert(committed_low == lower, "Must be");
 469     assert(committed_middle == middle, "Must be");
 470   }
 471 
 472   if (committed_middle > 0) {
 473     assert(committed_low == lower, "Must be");
 474   }
 475   if (committed_middle < middle) {
 476     assert(committed_high == 0, "Must be");
 477   }
 478 
 479   if (committed_low < lower) {
 480     assert(committed_high == 0, "Must be");
 481     assert(committed_middle == 0, "Must be");
 482   }
 483 #endif
 484 
 485   return committed_low + committed_middle + committed_high;
 486 }
 487 
 488 
 489 bool VirtualSpace::contains(const void* p) const {
 490   return low() <= (const char*) p && (const char*) p < high();
 491 }
 492 
 493 /*
 494    First we need to determine if a particular virtual space is using large
 495    pages.  This is done at the initialize function and only virtual spaces
 496    that are larger than LargePageSizeInBytes use large pages.  Once we
 497    have determined this, all expand_by and shrink_by calls must grow and
 498    shrink by large page size chunks.  If a particular request
 499    is within the current large page, the call to commit and uncommit memory
 500    can be ignored.  In the case that the low and high boundaries of this
 501    space is not large page aligned, the pages leading to the first large
 502    page address and the pages after the last large page address must be
 503    allocated with default pages.
 504 */
 505 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 506   if (uncommitted_size() < bytes) return false;
 507 
 508   if (special()) {
 509     // don't commit memory if the entire space is pinned in memory
 510     _high += bytes;
 511     return true;
 512   }
 513 
 514   char* previous_high = high();
 515   char* unaligned_new_high = high() + bytes;
 516   assert(unaligned_new_high <= high_boundary(),
 517          "cannot expand by more than upper boundary");
 518 
 519   // Calculate where the new high for each of the regions should be.  If
 520   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 521   // then the unaligned lower and upper new highs would be the
 522   // lower_high() and upper_high() respectively.
 523   char* unaligned_lower_new_high =
 524     MIN2(unaligned_new_high, lower_high_boundary());
 525   char* unaligned_middle_new_high =
 526     MIN2(unaligned_new_high, middle_high_boundary());
 527   char* unaligned_upper_new_high =
 528     MIN2(unaligned_new_high, upper_high_boundary());
 529 
 530   // Align the new highs based on the regions alignment.  lower and upper
 531   // alignment will always be default page size.  middle alignment will be
 532   // LargePageSizeInBytes if the actual size of the virtual space is in
 533   // fact larger than LargePageSizeInBytes.
 534   char* aligned_lower_new_high =
 535     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 536   char* aligned_middle_new_high =
 537     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 538   char* aligned_upper_new_high =
 539     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 540 
 541   // Determine which regions need to grow in this expand_by call.
 542   // If you are growing in the lower region, high() must be in that
 543   // region so calculate the size based on high().  For the middle and
 544   // upper regions, determine the starting point of growth based on the
 545   // location of high().  By getting the MAX of the region's low address
 546   // (or the previous region's high address) and high(), we can tell if it
 547   // is an intra or inter region growth.
 548   size_t lower_needs = 0;
 549   if (aligned_lower_new_high > lower_high()) {
 550     lower_needs =
 551       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 552   }
 553   size_t middle_needs = 0;
 554   if (aligned_middle_new_high > middle_high()) {
 555     middle_needs =
 556       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 557   }
 558   size_t upper_needs = 0;
 559   if (aligned_upper_new_high > upper_high()) {
 560     upper_needs =
 561       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 562   }
 563 
 564   // Check contiguity.
 565   assert(low_boundary() <= lower_high() &&
 566          lower_high() <= lower_high_boundary(),
 567          "high address must be contained within the region");
 568   assert(lower_high_boundary() <= middle_high() &&
 569          middle_high() <= middle_high_boundary(),
 570          "high address must be contained within the region");
 571   assert(middle_high_boundary() <= upper_high() &&
 572          upper_high() <= upper_high_boundary(),
 573          "high address must be contained within the region");
 574 
 575   // Commit regions
 576   if (lower_needs > 0) {
 577     assert(low_boundary() <= lower_high() &&
 578            lower_high() + lower_needs <= lower_high_boundary(),
 579            "must not expand beyond region");
 580     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
 581       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 582                          ", lower_needs=" SIZE_FORMAT ", %d) failed",
 583                          lower_high(), lower_needs, _executable);)
 584       return false;
 585     } else {
 586       _lower_high += lower_needs;
 587     }
 588   }
 589   if (middle_needs > 0) {
 590     assert(lower_high_boundary() <= middle_high() &&
 591            middle_high() + middle_needs <= middle_high_boundary(),
 592            "must not expand beyond region");
 593     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
 594                            _executable)) {
 595       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 596                          ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
 597                          ", %d) failed", middle_high(), middle_needs,
 598                          middle_alignment(), _executable);)
 599       return false;
 600     }
 601     _middle_high += middle_needs;
 602   }
 603   if (upper_needs > 0) {
 604     assert(middle_high_boundary() <= upper_high() &&
 605            upper_high() + upper_needs <= upper_high_boundary(),
 606            "must not expand beyond region");
 607     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
 608       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 609                          ", upper_needs=" SIZE_FORMAT ", %d) failed",
 610                          upper_high(), upper_needs, _executable);)
 611       return false;
 612     } else {
 613       _upper_high += upper_needs;
 614     }
 615   }
 616 
 617   if (pre_touch || AlwaysPreTouch) {
 618     int vm_ps = os::vm_page_size();
 619     for (char* curr = previous_high;
 620          curr < unaligned_new_high;
 621          curr += vm_ps) {
 622       // Note the use of a write here; originally we tried just a read, but
 623       // since the value read was unused, the optimizer removed the read.
 624       // If we ever have a concurrent touchahead thread, we'll want to use
 625       // a read, to avoid the potential of overwriting data (if a mutator
 626       // thread beats the touchahead thread to a page).  There are various
 627       // ways of making sure this read is not optimized away: for example,
 628       // generating the code for a read procedure at runtime.
 629       *curr = 0;
 630     }
 631   }
 632 
 633   _high += bytes;
 634   return true;
 635 }
 636 
 637 // A page is uncommitted if the contents of the entire page is deemed unusable.
 638 // Continue to decrement the high() pointer until it reaches a page boundary
 639 // in which case that particular page can now be uncommitted.
 640 void VirtualSpace::shrink_by(size_t size) {
 641   if (committed_size() < size)
 642     fatal("Cannot shrink virtual space to negative size");
 643 
 644   if (special()) {
 645     // don't uncommit if the entire space is pinned in memory
 646     _high -= size;
 647     return;
 648   }
 649 
 650   char* unaligned_new_high = high() - size;
 651   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 652 
 653   // Calculate new unaligned address
 654   char* unaligned_upper_new_high =
 655     MAX2(unaligned_new_high, middle_high_boundary());
 656   char* unaligned_middle_new_high =
 657     MAX2(unaligned_new_high, lower_high_boundary());
 658   char* unaligned_lower_new_high =
 659     MAX2(unaligned_new_high, low_boundary());
 660 
 661   // Align address to region's alignment
 662   char* aligned_upper_new_high =
 663     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 664   char* aligned_middle_new_high =
 665     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 666   char* aligned_lower_new_high =
 667     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 668 
 669   // Determine which regions need to shrink
 670   size_t upper_needs = 0;
 671   if (aligned_upper_new_high < upper_high()) {
 672     upper_needs =
 673       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 674   }
 675   size_t middle_needs = 0;
 676   if (aligned_middle_new_high < middle_high()) {
 677     middle_needs =
 678       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 679   }
 680   size_t lower_needs = 0;
 681   if (aligned_lower_new_high < lower_high()) {
 682     lower_needs =
 683       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 684   }
 685 
 686   // Check contiguity.
 687   assert(middle_high_boundary() <= upper_high() &&
 688          upper_high() <= upper_high_boundary(),
 689          "high address must be contained within the region");
 690   assert(lower_high_boundary() <= middle_high() &&
 691          middle_high() <= middle_high_boundary(),
 692          "high address must be contained within the region");
 693   assert(low_boundary() <= lower_high() &&
 694          lower_high() <= lower_high_boundary(),
 695          "high address must be contained within the region");
 696 
 697   // Uncommit
 698   if (upper_needs > 0) {
 699     assert(middle_high_boundary() <= aligned_upper_new_high &&
 700            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 701            "must not shrink beyond region");
 702     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 703       debug_only(warning("os::uncommit_memory failed"));
 704       return;
 705     } else {
 706       _upper_high -= upper_needs;
 707     }
 708   }
 709   if (middle_needs > 0) {
 710     assert(lower_high_boundary() <= aligned_middle_new_high &&
 711            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 712            "must not shrink beyond region");
 713     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 714       debug_only(warning("os::uncommit_memory failed"));
 715       return;
 716     } else {
 717       _middle_high -= middle_needs;
 718     }
 719   }
 720   if (lower_needs > 0) {
 721     assert(low_boundary() <= aligned_lower_new_high &&
 722            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 723            "must not shrink beyond region");
 724     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 725       debug_only(warning("os::uncommit_memory failed"));
 726       return;
 727     } else {
 728       _lower_high -= lower_needs;
 729     }
 730   }
 731 
 732   _high -= size;
 733 }
 734 
 735 #ifndef PRODUCT
 736 void VirtualSpace::check_for_contiguity() {
 737   // Check contiguity.
 738   assert(low_boundary() <= lower_high() &&
 739          lower_high() <= lower_high_boundary(),
 740          "high address must be contained within the region");
 741   assert(lower_high_boundary() <= middle_high() &&
 742          middle_high() <= middle_high_boundary(),
 743          "high address must be contained within the region");
 744   assert(middle_high_boundary() <= upper_high() &&
 745          upper_high() <= upper_high_boundary(),
 746          "high address must be contained within the region");
 747   assert(low() >= low_boundary(), "low");
 748   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
 749   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
 750   assert(high() <= upper_high(), "upper high");
 751 }
 752 
 753 void VirtualSpace::print_on(outputStream* out) {
 754   out->print   ("Virtual space:");
 755   if (special()) out->print(" (pinned in memory)");
 756   out->cr();
 757   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
 758   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
 759   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
 760   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
 761 }
 762 
 763 void VirtualSpace::print() {
 764   print_on(tty);
 765 }
 766 
 767 /////////////// Unit tests ///////////////
 768 
 769 #ifndef PRODUCT
 770 
 771 #define test_log(...) \
 772   do {\
 773     if (VerboseInternalVMTests) { \
 774       tty->print_cr(__VA_ARGS__); \
 775       tty->flush(); \
 776     }\
 777   } while (false)
 778 
 779 class TestReservedSpace : AllStatic {
 780  public:
 781   static void small_page_write(void* addr, size_t size) {
 782     size_t page_size = os::vm_page_size();
 783 
 784     char* end = (char*)addr + size;
 785     for (char* p = (char*)addr; p < end; p += page_size) {
 786       *p = 1;
 787     }
 788   }
 789 
 790   static void release_memory_for_test(ReservedSpace rs) {
 791     if (rs.special()) {
 792       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
 793     } else {
 794       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
 795     }
 796   }
 797 
 798   static void test_reserved_space1(size_t size, size_t alignment) {
 799     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
 800 
 801     assert(is_size_aligned(size, alignment), "Incorrect input parameters");
 802 
 803     ReservedSpace rs(size,          // size
 804                      alignment,     // alignment
 805                      UseLargePages, // large
 806                      NULL,          // requested_address
 807                      0);            // noacces_prefix
 808 
 809     test_log(" rs.special() == %d", rs.special());
 810 
 811     assert(rs.base() != NULL, "Must be");
 812     assert(rs.size() == size, "Must be");
 813 
 814     assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
 815     assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
 816 
 817     if (rs.special()) {
 818       small_page_write(rs.base(), size);
 819     }
 820 
 821     release_memory_for_test(rs);
 822   }
 823 
 824   static void test_reserved_space2(size_t size) {
 825     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
 826 
 827     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
 828 
 829     ReservedSpace rs(size);
 830 
 831     test_log(" rs.special() == %d", rs.special());
 832 
 833     assert(rs.base() != NULL, "Must be");
 834     assert(rs.size() == size, "Must be");
 835 
 836     if (rs.special()) {
 837       small_page_write(rs.base(), size);
 838     }
 839 
 840     release_memory_for_test(rs);
 841   }
 842 
 843   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
 844     test_log("test_reserved_space3(%p, %p, %d)",
 845         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
 846 
 847     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
 848     assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
 849 
 850     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
 851 
 852     ReservedSpace rs(size, alignment, large, false);
 853 
 854     test_log(" rs.special() == %d", rs.special());
 855 
 856     assert(rs.base() != NULL, "Must be");
 857     assert(rs.size() == size, "Must be");
 858 
 859     if (rs.special()) {
 860       small_page_write(rs.base(), size);
 861     }
 862 
 863     release_memory_for_test(rs);
 864   }
 865 
 866 
 867   static void test_reserved_space1() {
 868     size_t size = 2 * 1024 * 1024;
 869     size_t ag   = os::vm_allocation_granularity();
 870 
 871     test_reserved_space1(size,      ag);
 872     test_reserved_space1(size * 2,  ag);
 873     test_reserved_space1(size * 10, ag);
 874   }
 875 
 876   static void test_reserved_space2() {
 877     size_t size = 2 * 1024 * 1024;
 878     size_t ag = os::vm_allocation_granularity();
 879 
 880     test_reserved_space2(size * 1);
 881     test_reserved_space2(size * 2);
 882     test_reserved_space2(size * 10);
 883     test_reserved_space2(ag);
 884     test_reserved_space2(size - ag);
 885     test_reserved_space2(size);
 886     test_reserved_space2(size + ag);
 887     test_reserved_space2(size * 2);
 888     test_reserved_space2(size * 2 - ag);
 889     test_reserved_space2(size * 2 + ag);
 890     test_reserved_space2(size * 3);
 891     test_reserved_space2(size * 3 - ag);
 892     test_reserved_space2(size * 3 + ag);
 893     test_reserved_space2(size * 10);
 894     test_reserved_space2(size * 10 + size / 2);
 895   }
 896 
 897   static void test_reserved_space3() {
 898     size_t ag = os::vm_allocation_granularity();
 899 
 900     test_reserved_space3(ag,      ag    , false);
 901     test_reserved_space3(ag * 2,  ag    , false);
 902     test_reserved_space3(ag * 3,  ag    , false);
 903     test_reserved_space3(ag * 2,  ag * 2, false);
 904     test_reserved_space3(ag * 4,  ag * 2, false);
 905     test_reserved_space3(ag * 8,  ag * 2, false);
 906     test_reserved_space3(ag * 4,  ag * 4, false);
 907     test_reserved_space3(ag * 8,  ag * 4, false);
 908     test_reserved_space3(ag * 16, ag * 4, false);
 909 
 910     if (UseLargePages) {
 911       size_t lp = os::large_page_size();
 912 
 913       // Without large pages
 914       test_reserved_space3(lp,     ag * 4, false);
 915       test_reserved_space3(lp * 2, ag * 4, false);
 916       test_reserved_space3(lp * 4, ag * 4, false);
 917       test_reserved_space3(lp,     lp    , false);
 918       test_reserved_space3(lp * 2, lp    , false);
 919       test_reserved_space3(lp * 3, lp    , false);
 920       test_reserved_space3(lp * 2, lp * 2, false);
 921       test_reserved_space3(lp * 4, lp * 2, false);
 922       test_reserved_space3(lp * 8, lp * 2, false);
 923 
 924       // With large pages
 925       test_reserved_space3(lp, ag * 4    , true);
 926       test_reserved_space3(lp * 2, ag * 4, true);
 927       test_reserved_space3(lp * 4, ag * 4, true);
 928       test_reserved_space3(lp, lp        , true);
 929       test_reserved_space3(lp * 2, lp    , true);
 930       test_reserved_space3(lp * 3, lp    , true);
 931       test_reserved_space3(lp * 2, lp * 2, true);
 932       test_reserved_space3(lp * 4, lp * 2, true);
 933       test_reserved_space3(lp * 8, lp * 2, true);
 934     }
 935   }
 936 
 937   static void test_reserved_space() {
 938     test_reserved_space1();
 939     test_reserved_space2();
 940     test_reserved_space3();
 941   }
 942 };
 943 
 944 void TestReservedSpace_test() {
 945   TestReservedSpace::test_reserved_space();
 946 }
 947 
 948 #define assert_equals(actual, expected)     \
 949   assert(actual == expected,                \
 950     err_msg("Got " SIZE_FORMAT " expected " \
 951       SIZE_FORMAT, actual, expected));
 952 
 953 #define assert_ge(value1, value2)                  \
 954   assert(value1 >= value2,                         \
 955     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
 956       #value2 "': " SIZE_FORMAT, value1, value2));
 957 
 958 #define assert_lt(value1, value2)                  \
 959   assert(value1 < value2,                          \
 960     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
 961       #value2 "': " SIZE_FORMAT, value1, value2));
 962 
 963 
 964 class TestVirtualSpace : AllStatic {
 965   enum TestLargePages {
 966     Default,
 967     Disable,
 968     Reserve,
 969     Commit
 970   };
 971 
 972   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
 973     switch(mode) {
 974     default:
 975     case Default:
 976     case Reserve:
 977       return ReservedSpace(reserve_size_aligned);
 978     case Disable:
 979     case Commit:
 980       return ReservedSpace(reserve_size_aligned,
 981                            os::vm_allocation_granularity(),
 982                            /* large */ false, /* exec */ false);
 983     }
 984   }
 985 
 986   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
 987     switch(mode) {
 988     default:
 989     case Default:
 990     case Reserve:
 991       return vs.initialize(rs, 0);
 992     case Disable:
 993       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
 994     case Commit:
 995       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
 996     }
 997   }
 998 
 999  public:
1000   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1001                                                         TestLargePages mode = Default) {
1002     size_t granularity = os::vm_allocation_granularity();
1003     size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
1004 
1005     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1006 
1007     assert(reserved.is_reserved(), "Must be");
1008 
1009     VirtualSpace vs;
1010     bool initialized = initialize_virtual_space(vs, reserved, mode);
1011     assert(initialized, "Failed to initialize VirtualSpace");
1012 
1013     vs.expand_by(commit_size, false);
1014 
1015     if (vs.special()) {
1016       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1017     } else {
1018       assert_ge(vs.actual_committed_size(), commit_size);
1019       // Approximate the commit granularity.
1020       // Make sure that we don't commit using large pages
1021       // if large pages has been disabled for this VirtualSpace.
1022       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1023                                    os::vm_page_size() : os::large_page_size();
1024       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1025     }
1026 
1027     reserved.release();
1028   }
1029 
1030   static void test_virtual_space_actual_committed_space_one_large_page() {
1031     if (!UseLargePages) {
1032       return;
1033     }
1034 
1035     size_t large_page_size = os::large_page_size();
1036 
1037     ReservedSpace reserved(large_page_size, large_page_size, true, false);
1038 
1039     assert(reserved.is_reserved(), "Must be");
1040 
1041     VirtualSpace vs;
1042     bool initialized = vs.initialize(reserved, 0);
1043     assert(initialized, "Failed to initialize VirtualSpace");
1044 
1045     vs.expand_by(large_page_size, false);
1046 
1047     assert_equals(vs.actual_committed_size(), large_page_size);
1048 
1049     reserved.release();
1050   }
1051 
1052   static void test_virtual_space_actual_committed_space() {
1053     test_virtual_space_actual_committed_space(4 * K, 0);
1054     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1055     test_virtual_space_actual_committed_space(8 * K, 0);
1056     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1057     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1058     test_virtual_space_actual_committed_space(12 * K, 0);
1059     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1060     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1061     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1062     test_virtual_space_actual_committed_space(64 * K, 0);
1063     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1064     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1065     test_virtual_space_actual_committed_space(2 * M, 0);
1066     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1067     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1068     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1069     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1070     test_virtual_space_actual_committed_space(10 * M, 0);
1071     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1072     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1073     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1074     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1075     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1076     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1077   }
1078 
1079   static void test_virtual_space_disable_large_pages() {
1080     if (!UseLargePages) {
1081       return;
1082     }
1083     // These test cases verify that if we force VirtualSpace to disable large pages
1084     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1085     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1086     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1087     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1088     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1089     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1090     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1091 
1092     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1093     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1094     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1095     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1096     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1097     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1098     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1099 
1100     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1101     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1102     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1103     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1104     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1105     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1106     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1107   }
1108 
1109   static void test_virtual_space() {
1110     test_virtual_space_actual_committed_space();
1111     test_virtual_space_actual_committed_space_one_large_page();
1112     test_virtual_space_disable_large_pages();
1113   }
1114 };
1115 
1116 void TestVirtualSpace_test() {
1117   TestVirtualSpace::test_virtual_space();
1118 }
1119 
1120 #endif // PRODUCT
1121 
1122 #endif