1 /*
   2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "oops/markOop.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "runtime/virtualspace.hpp"
  29 #include "services/memTracker.hpp"
  30 #ifdef TARGET_OS_FAMILY_linux
  31 # include "os_linux.inline.hpp"
  32 #endif
  33 #ifdef TARGET_OS_FAMILY_solaris
  34 # include "os_solaris.inline.hpp"
  35 #endif
  36 #ifdef TARGET_OS_FAMILY_windows
  37 # include "os_windows.inline.hpp"
  38 #endif
  39 #ifdef TARGET_OS_FAMILY_bsd
  40 # include "os_bsd.inline.hpp"
  41 #endif
  42 
  43 
  44 // ReservedSpace
  45 
  46 // Dummy constructor
  47 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  48     _alignment(0), _special(false), _executable(false) {
  49 }
  50 
  51 ReservedSpace::ReservedSpace(size_t size) {
  52   size_t page_size = os::page_size_for_region(size, size, 1);
  53   bool large_pages = page_size != (size_t)os::vm_page_size();
  54   // Don't force the alignment to be large page aligned,
  55   // since that will waste memory.
  56   size_t alignment = os::vm_allocation_granularity();
  57   initialize(size, alignment, large_pages, NULL, 0, false);
  58 }
  59 
  60 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  61                              bool large,
  62                              char* requested_address,
  63                              const size_t noaccess_prefix) {
  64   initialize(size+noaccess_prefix, alignment, large, requested_address,
  65              noaccess_prefix, false);
  66 }
  67 
  68 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  69                              bool large,
  70                              bool executable) {
  71   initialize(size, alignment, large, NULL, 0, executable);
  72 }
  73 
  74 // Helper method.
  75 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  76                                            const size_t size, bool special)
  77 {
  78   if (base == requested_address || requested_address == NULL)
  79     return false; // did not fail
  80 
  81   if (base != NULL) {
  82     // Different reserve address may be acceptable in other cases
  83     // but for compressed oops heap should be at requested address.
  84     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  85     if (PrintCompressedOopsMode) {
  86       tty->cr();
  87       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
  88     }
  89     // OS ignored requested address. Try different address.
  90     if (special) {
  91       if (!os::release_memory_special(base, size)) {
  92         fatal("os::release_memory_special failed");
  93       }
  94     } else {
  95       if (!os::release_memory(base, size)) {
  96         fatal("os::release_memory failed");
  97       }
  98     }
  99   }
 100   return true;
 101 }
 102 
 103 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
 104                                char* requested_address,
 105                                const size_t noaccess_prefix,
 106                                bool executable) {
 107   const size_t granularity = os::vm_allocation_granularity();
 108   assert((size & (granularity - 1)) == 0,
 109          "size not aligned to os::vm_allocation_granularity()");
 110   assert((alignment & (granularity - 1)) == 0,
 111          "alignment not aligned to os::vm_allocation_granularity()");
 112   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 113          "not a power of 2");
 114 
 115   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 116 
 117   // Assert that if noaccess_prefix is used, it is the same as alignment.
 118   assert(noaccess_prefix == 0 ||
 119          noaccess_prefix == alignment, "noaccess prefix wrong");
 120 
 121   _base = NULL;
 122   _size = 0;
 123   _special = false;
 124   _executable = executable;
 125   _alignment = 0;
 126   _noaccess_prefix = 0;
 127   if (size == 0) {
 128     return;
 129   }
 130 
 131   // If OS doesn't support demand paging for large page memory, we need
 132   // to use reserve_memory_special() to reserve and pin the entire region.
 133   bool special = large && !os::can_commit_large_page_memory();
 134   char* base = NULL;
 135 
 136   if (requested_address != 0) {
 137     requested_address -= noaccess_prefix; // adjust requested address
 138     assert(requested_address != NULL, "huge noaccess prefix?");
 139   }
 140 
 141   if (special) {
 142 
 143     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 144 
 145     if (base != NULL) {
 146       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 147         // OS ignored requested address. Try different address.
 148         return;
 149       }
 150       // Check alignment constraints.
 151       assert((uintptr_t) base % alignment == 0,
 152              err_msg("Large pages returned a non-aligned address, base: "
 153                  PTR_FORMAT " alignment: " PTR_FORMAT,
 154                  base, (void*)(uintptr_t)alignment));
 155       _special = true;
 156     } else {
 157       // failed; try to reserve regular memory below
 158       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 159                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 160         if (PrintCompressedOopsMode) {
 161           tty->cr();
 162           tty->print_cr("Reserve regular memory without large pages.");
 163         }
 164       }
 165     }
 166   }
 167 
 168   if (base == NULL) {
 169     // Optimistically assume that the OSes returns an aligned base pointer.
 170     // When reserving a large address range, most OSes seem to align to at
 171     // least 64K.
 172 
 173     // If the memory was requested at a particular address, use
 174     // os::attempt_reserve_memory_at() to avoid over mapping something
 175     // important.  If available space is not detected, return NULL.
 176 
 177     if (requested_address != 0) {
 178       base = os::attempt_reserve_memory_at(size, requested_address);
 179       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 180         // OS ignored requested address. Try different address.
 181         base = NULL;
 182       }
 183     } else {
 184       base = os::reserve_memory(size, NULL, alignment);
 185     }
 186 
 187     if (base == NULL) return;
 188 
 189     // Check alignment constraints
 190     if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
 191       // Base not aligned, retry
 192       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 193       // Make sure that size is aligned
 194       size = align_size_up(size, alignment);
 195       base = os::reserve_memory_aligned(size, alignment);
 196 
 197       if (requested_address != 0 &&
 198           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 199         // As a result of the alignment constraints, the allocated base differs
 200         // from the requested address. Return back to the caller who can
 201         // take remedial action (like try again without a requested address).
 202         assert(_base == NULL, "should be");
 203         return;
 204       }
 205     }
 206   }
 207   // Done
 208   _base = base;
 209   _size = size;
 210   _alignment = alignment;
 211   _noaccess_prefix = noaccess_prefix;
 212 
 213   // Assert that if noaccess_prefix is used, it is the same as alignment.
 214   assert(noaccess_prefix == 0 ||
 215          noaccess_prefix == _alignment, "noaccess prefix wrong");
 216 
 217   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 218          "area must be distinguisable from marks for mark-sweep");
 219   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 220          "area must be distinguisable from marks for mark-sweep");
 221 }
 222 
 223 
 224 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 225                              bool special, bool executable) {
 226   assert((size % os::vm_allocation_granularity()) == 0,
 227          "size not allocation aligned");
 228   _base = base;
 229   _size = size;
 230   _alignment = alignment;
 231   _noaccess_prefix = 0;
 232   _special = special;
 233   _executable = executable;
 234 }
 235 
 236 
 237 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 238                                         bool split, bool realloc) {
 239   assert(partition_size <= size(), "partition failed");
 240   if (split) {
 241     os::split_reserved_memory(base(), size(), partition_size, realloc);
 242   }
 243   ReservedSpace result(base(), partition_size, alignment, special(),
 244                        executable());
 245   return result;
 246 }
 247 
 248 
 249 ReservedSpace
 250 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 251   assert(partition_size <= size(), "partition failed");
 252   ReservedSpace result(base() + partition_size, size() - partition_size,
 253                        alignment, special(), executable());
 254   return result;
 255 }
 256 
 257 
 258 size_t ReservedSpace::page_align_size_up(size_t size) {
 259   return align_size_up(size, os::vm_page_size());
 260 }
 261 
 262 
 263 size_t ReservedSpace::page_align_size_down(size_t size) {
 264   return align_size_down(size, os::vm_page_size());
 265 }
 266 
 267 
 268 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 269   return align_size_up(size, os::vm_allocation_granularity());
 270 }
 271 
 272 
 273 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 274   return align_size_down(size, os::vm_allocation_granularity());
 275 }
 276 
 277 
 278 void ReservedSpace::release() {
 279   if (is_reserved()) {
 280     char *real_base = _base - _noaccess_prefix;
 281     const size_t real_size = _size + _noaccess_prefix;
 282     if (special()) {
 283       os::release_memory_special(real_base, real_size);
 284     } else{
 285       os::release_memory(real_base, real_size);
 286     }
 287     _base = NULL;
 288     _size = 0;
 289     _noaccess_prefix = 0;
 290     _special = false;
 291     _executable = false;
 292   }
 293 }
 294 
 295 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
 296   assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
 297                                       (Universe::narrow_oop_base() != NULL) &&
 298                                       Universe::narrow_oop_use_implicit_null_checks()),
 299          "noaccess_prefix should be used only with non zero based compressed oops");
 300 
 301   // If there is no noaccess prefix, return.
 302   if (_noaccess_prefix == 0) return;
 303 
 304   assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
 305          "must be at least page size big");
 306 
 307   // Protect memory at the base of the allocated region.
 308   // If special, the page was committed (only matters on windows)
 309   if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
 310                           _special)) {
 311     fatal("cannot protect protection page");
 312   }
 313   if (PrintCompressedOopsMode) {
 314     tty->cr();
 315     tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
 316   }
 317 
 318   _base += _noaccess_prefix;
 319   _size -= _noaccess_prefix;
 320   assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
 321          "must be exactly of required size and alignment");
 322 }
 323 
 324 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
 325                                      bool large, char* requested_address) :
 326   ReservedSpace(size, alignment, large,
 327                 requested_address,
 328                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
 329                  Universe::narrow_oop_use_implicit_null_checks()) ?
 330                   lcm(os::vm_page_size(), alignment) : 0) {
 331   if (base() > 0) {
 332     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 333   }
 334 
 335   // Only reserved space for the java heap should have a noaccess_prefix
 336   // if using compressed oops.
 337   protect_noaccess_prefix(size);
 338 }
 339 
 340 // Reserve space for code segment.  Same as Java heap only we mark this as
 341 // executable.
 342 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 343                                      size_t rs_align,
 344                                      bool large) :
 345   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 346   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 347 }
 348 
 349 // VirtualSpace
 350 
 351 VirtualSpace::VirtualSpace() {
 352   _low_boundary           = NULL;
 353   _high_boundary          = NULL;
 354   _low                    = NULL;
 355   _high                   = NULL;
 356   _lower_high             = NULL;
 357   _middle_high            = NULL;
 358   _upper_high             = NULL;
 359   _lower_high_boundary    = NULL;
 360   _middle_high_boundary   = NULL;
 361   _upper_high_boundary    = NULL;
 362   _lower_alignment        = 0;
 363   _middle_alignment       = 0;
 364   _upper_alignment        = 0;
 365   _special                = false;
 366   _executable             = false;
 367 }
 368 
 369 
 370 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 371   const size_t max_commit_granularity = os::page_size_for_region(rs.size(), rs.size(), 1);
 372   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 373 }
 374 
 375 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 376   if(!rs.is_reserved()) return false;  // allocation failed.
 377   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 378   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 379 
 380   _low_boundary  = rs.base();
 381   _high_boundary = low_boundary() + rs.size();
 382 
 383   _low = low_boundary();
 384   _high = low();
 385 
 386   _special = rs.special();
 387   _executable = rs.executable();
 388 
 389   // When a VirtualSpace begins life at a large size, make all future expansion
 390   // and shrinking occur aligned to a granularity of large pages.  This avoids
 391   // fragmentation of physical addresses that inhibits the use of large pages
 392   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 393   // page size, the only spaces that get handled this way are codecache and
 394   // the heap itself, both of which provide a substantial performance
 395   // boost in many benchmarks when covered by large pages.
 396   //
 397   // No attempt is made to force large page alignment at the very top and
 398   // bottom of the space if they are not aligned so already.
 399   _lower_alignment  = os::vm_page_size();
 400   _middle_alignment = max_commit_granularity;
 401   _upper_alignment  = os::vm_page_size();
 402 
 403   // End of each region
 404   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
 405   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
 406   _upper_high_boundary = high_boundary();
 407 
 408   // High address of each region
 409   _lower_high = low_boundary();
 410   _middle_high = lower_high_boundary();
 411   _upper_high = middle_high_boundary();
 412 
 413   // commit to initial size
 414   if (committed_size > 0) {
 415     if (!expand_by(committed_size)) {
 416       return false;
 417     }
 418   }
 419   return true;
 420 }
 421 
 422 
 423 VirtualSpace::~VirtualSpace() {
 424   release();
 425 }
 426 
 427 
 428 void VirtualSpace::release() {
 429   // This does not release memory it never reserved.
 430   // Caller must release via rs.release();
 431   _low_boundary           = NULL;
 432   _high_boundary          = NULL;
 433   _low                    = NULL;
 434   _high                   = NULL;
 435   _lower_high             = NULL;
 436   _middle_high            = NULL;
 437   _upper_high             = NULL;
 438   _lower_high_boundary    = NULL;
 439   _middle_high_boundary   = NULL;
 440   _upper_high_boundary    = NULL;
 441   _lower_alignment        = 0;
 442   _middle_alignment       = 0;
 443   _upper_alignment        = 0;
 444   _special                = false;
 445   _executable             = false;
 446 }
 447 
 448 
 449 size_t VirtualSpace::committed_size() const {
 450   return pointer_delta(high(), low(), sizeof(char));
 451 }
 452 
 453 
 454 size_t VirtualSpace::reserved_size() const {
 455   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 456 }
 457 
 458 
 459 size_t VirtualSpace::uncommitted_size()  const {
 460   return reserved_size() - committed_size();
 461 }
 462 
 463 size_t VirtualSpace::actual_committed_size() const {
 464   // Special VirtualSpaces commit all reserved space up front.
 465   if (special()) {
 466     return reserved_size();
 467   }
 468 
 469   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 470   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 471   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 472 
 473 #ifdef ASSERT
 474   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 475   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 476   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 477 
 478   if (committed_high > 0) {
 479     assert(committed_low == lower, "Must be");
 480     assert(committed_middle == middle, "Must be");
 481   }
 482 
 483   if (committed_middle > 0) {
 484     assert(committed_low == lower, "Must be");
 485   }
 486   if (committed_middle < middle) {
 487     assert(committed_high == 0, "Must be");
 488   }
 489 
 490   if (committed_low < lower) {
 491     assert(committed_high == 0, "Must be");
 492     assert(committed_middle == 0, "Must be");
 493   }
 494 #endif
 495 
 496   return committed_low + committed_middle + committed_high;
 497 }
 498 
 499 
 500 bool VirtualSpace::contains(const void* p) const {
 501   return low() <= (const char*) p && (const char*) p < high();
 502 }
 503 
 504 /*
 505    First we need to determine if a particular virtual space is using large
 506    pages.  This is done at the initialize function and only virtual spaces
 507    that are larger than LargePageSizeInBytes use large pages.  Once we
 508    have determined this, all expand_by and shrink_by calls must grow and
 509    shrink by large page size chunks.  If a particular request
 510    is within the current large page, the call to commit and uncommit memory
 511    can be ignored.  In the case that the low and high boundaries of this
 512    space is not large page aligned, the pages leading to the first large
 513    page address and the pages after the last large page address must be
 514    allocated with default pages.
 515 */
 516 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 517   if (uncommitted_size() < bytes) return false;
 518 
 519   if (special()) {
 520     // don't commit memory if the entire space is pinned in memory
 521     _high += bytes;
 522     return true;
 523   }
 524 
 525   char* previous_high = high();
 526   char* unaligned_new_high = high() + bytes;
 527   assert(unaligned_new_high <= high_boundary(),
 528          "cannot expand by more than upper boundary");
 529 
 530   // Calculate where the new high for each of the regions should be.  If
 531   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 532   // then the unaligned lower and upper new highs would be the
 533   // lower_high() and upper_high() respectively.
 534   char* unaligned_lower_new_high =
 535     MIN2(unaligned_new_high, lower_high_boundary());
 536   char* unaligned_middle_new_high =
 537     MIN2(unaligned_new_high, middle_high_boundary());
 538   char* unaligned_upper_new_high =
 539     MIN2(unaligned_new_high, upper_high_boundary());
 540 
 541   // Align the new highs based on the regions alignment.  lower and upper
 542   // alignment will always be default page size.  middle alignment will be
 543   // LargePageSizeInBytes if the actual size of the virtual space is in
 544   // fact larger than LargePageSizeInBytes.
 545   char* aligned_lower_new_high =
 546     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 547   char* aligned_middle_new_high =
 548     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 549   char* aligned_upper_new_high =
 550     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 551 
 552   // Determine which regions need to grow in this expand_by call.
 553   // If you are growing in the lower region, high() must be in that
 554   // region so calcuate the size based on high().  For the middle and
 555   // upper regions, determine the starting point of growth based on the
 556   // location of high().  By getting the MAX of the region's low address
 557   // (or the prevoius region's high address) and high(), we can tell if it
 558   // is an intra or inter region growth.
 559   size_t lower_needs = 0;
 560   if (aligned_lower_new_high > lower_high()) {
 561     lower_needs =
 562       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 563   }
 564   size_t middle_needs = 0;
 565   if (aligned_middle_new_high > middle_high()) {
 566     middle_needs =
 567       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 568   }
 569   size_t upper_needs = 0;
 570   if (aligned_upper_new_high > upper_high()) {
 571     upper_needs =
 572       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 573   }
 574 
 575   // Check contiguity.
 576   assert(low_boundary() <= lower_high() &&
 577          lower_high() <= lower_high_boundary(),
 578          "high address must be contained within the region");
 579   assert(lower_high_boundary() <= middle_high() &&
 580          middle_high() <= middle_high_boundary(),
 581          "high address must be contained within the region");
 582   assert(middle_high_boundary() <= upper_high() &&
 583          upper_high() <= upper_high_boundary(),
 584          "high address must be contained within the region");
 585 
 586   // Commit regions
 587   if (lower_needs > 0) {
 588     assert(low_boundary() <= lower_high() &&
 589            lower_high() + lower_needs <= lower_high_boundary(),
 590            "must not expand beyond region");
 591     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
 592       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 593                          ", lower_needs=" SIZE_FORMAT ", %d) failed",
 594                          lower_high(), lower_needs, _executable);)
 595       return false;
 596     } else {
 597       _lower_high += lower_needs;
 598     }
 599   }
 600   if (middle_needs > 0) {
 601     assert(lower_high_boundary() <= middle_high() &&
 602            middle_high() + middle_needs <= middle_high_boundary(),
 603            "must not expand beyond region");
 604     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
 605                            _executable)) {
 606       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 607                          ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
 608                          ", %d) failed", middle_high(), middle_needs,
 609                          middle_alignment(), _executable);)
 610       return false;
 611     }
 612     _middle_high += middle_needs;
 613   }
 614   if (upper_needs > 0) {
 615     assert(middle_high_boundary() <= upper_high() &&
 616            upper_high() + upper_needs <= upper_high_boundary(),
 617            "must not expand beyond region");
 618     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
 619       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 620                          ", upper_needs=" SIZE_FORMAT ", %d) failed",
 621                          upper_high(), upper_needs, _executable);)
 622       return false;
 623     } else {
 624       _upper_high += upper_needs;
 625     }
 626   }
 627 
 628   if (pre_touch || AlwaysPreTouch) {
 629     int vm_ps = os::vm_page_size();
 630     for (char* curr = previous_high;
 631          curr < unaligned_new_high;
 632          curr += vm_ps) {
 633       // Note the use of a write here; originally we tried just a read, but
 634       // since the value read was unused, the optimizer removed the read.
 635       // If we ever have a concurrent touchahead thread, we'll want to use
 636       // a read, to avoid the potential of overwriting data (if a mutator
 637       // thread beats the touchahead thread to a page).  There are various
 638       // ways of making sure this read is not optimized away: for example,
 639       // generating the code for a read procedure at runtime.
 640       *curr = 0;
 641     }
 642   }
 643 
 644   _high += bytes;
 645   return true;
 646 }
 647 
 648 // A page is uncommitted if the contents of the entire page is deemed unusable.
 649 // Continue to decrement the high() pointer until it reaches a page boundary
 650 // in which case that particular page can now be uncommitted.
 651 void VirtualSpace::shrink_by(size_t size) {
 652   if (committed_size() < size)
 653     fatal("Cannot shrink virtual space to negative size");
 654 
 655   if (special()) {
 656     // don't uncommit if the entire space is pinned in memory
 657     _high -= size;
 658     return;
 659   }
 660 
 661   char* unaligned_new_high = high() - size;
 662   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 663 
 664   // Calculate new unaligned address
 665   char* unaligned_upper_new_high =
 666     MAX2(unaligned_new_high, middle_high_boundary());
 667   char* unaligned_middle_new_high =
 668     MAX2(unaligned_new_high, lower_high_boundary());
 669   char* unaligned_lower_new_high =
 670     MAX2(unaligned_new_high, low_boundary());
 671 
 672   // Align address to region's alignment
 673   char* aligned_upper_new_high =
 674     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 675   char* aligned_middle_new_high =
 676     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 677   char* aligned_lower_new_high =
 678     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 679 
 680   // Determine which regions need to shrink
 681   size_t upper_needs = 0;
 682   if (aligned_upper_new_high < upper_high()) {
 683     upper_needs =
 684       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 685   }
 686   size_t middle_needs = 0;
 687   if (aligned_middle_new_high < middle_high()) {
 688     middle_needs =
 689       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 690   }
 691   size_t lower_needs = 0;
 692   if (aligned_lower_new_high < lower_high()) {
 693     lower_needs =
 694       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 695   }
 696 
 697   // Check contiguity.
 698   assert(middle_high_boundary() <= upper_high() &&
 699          upper_high() <= upper_high_boundary(),
 700          "high address must be contained within the region");
 701   assert(lower_high_boundary() <= middle_high() &&
 702          middle_high() <= middle_high_boundary(),
 703          "high address must be contained within the region");
 704   assert(low_boundary() <= lower_high() &&
 705          lower_high() <= lower_high_boundary(),
 706          "high address must be contained within the region");
 707 
 708   // Uncommit
 709   if (upper_needs > 0) {
 710     assert(middle_high_boundary() <= aligned_upper_new_high &&
 711            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 712            "must not shrink beyond region");
 713     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 714       debug_only(warning("os::uncommit_memory failed"));
 715       return;
 716     } else {
 717       _upper_high -= upper_needs;
 718     }
 719   }
 720   if (middle_needs > 0) {
 721     assert(lower_high_boundary() <= aligned_middle_new_high &&
 722            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 723            "must not shrink beyond region");
 724     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 725       debug_only(warning("os::uncommit_memory failed"));
 726       return;
 727     } else {
 728       _middle_high -= middle_needs;
 729     }
 730   }
 731   if (lower_needs > 0) {
 732     assert(low_boundary() <= aligned_lower_new_high &&
 733            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 734            "must not shrink beyond region");
 735     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 736       debug_only(warning("os::uncommit_memory failed"));
 737       return;
 738     } else {
 739       _lower_high -= lower_needs;
 740     }
 741   }
 742 
 743   _high -= size;
 744 }
 745 
 746 #ifndef PRODUCT
 747 void VirtualSpace::check_for_contiguity() {
 748   // Check contiguity.
 749   assert(low_boundary() <= lower_high() &&
 750          lower_high() <= lower_high_boundary(),
 751          "high address must be contained within the region");
 752   assert(lower_high_boundary() <= middle_high() &&
 753          middle_high() <= middle_high_boundary(),
 754          "high address must be contained within the region");
 755   assert(middle_high_boundary() <= upper_high() &&
 756          upper_high() <= upper_high_boundary(),
 757          "high address must be contained within the region");
 758   assert(low() >= low_boundary(), "low");
 759   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
 760   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
 761   assert(high() <= upper_high(), "upper high");
 762 }
 763 
 764 void VirtualSpace::print_on(outputStream* out) {
 765   out->print   ("Virtual space:");
 766   if (special()) out->print(" (pinned in memory)");
 767   out->cr();
 768   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
 769   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
 770   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
 771   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
 772 }
 773 
 774 void VirtualSpace::print() {
 775   print_on(tty);
 776 }
 777 
 778 /////////////// Unit tests ///////////////
 779 
 780 #ifndef PRODUCT
 781 
 782 #define test_log(...) \
 783   do {\
 784     if (VerboseInternalVMTests) { \
 785       tty->print_cr(__VA_ARGS__); \
 786       tty->flush(); \
 787     }\
 788   } while (false)
 789 
 790 class TestReservedSpace : AllStatic {
 791  public:
 792   static void small_page_write(void* addr, size_t size) {
 793     size_t page_size = os::vm_page_size();
 794 
 795     char* end = (char*)addr + size;
 796     for (char* p = (char*)addr; p < end; p += page_size) {
 797       *p = 1;
 798     }
 799   }
 800 
 801   static void release_memory_for_test(ReservedSpace rs) {
 802     if (rs.special()) {
 803       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
 804     } else {
 805       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
 806     }
 807   }
 808 
 809   static void test_reserved_space1(size_t size, size_t alignment) {
 810     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
 811 
 812     assert(is_size_aligned(size, alignment), "Incorrect input parameters");
 813 
 814     ReservedSpace rs(size,          // size
 815                      alignment,     // alignment
 816                      UseLargePages, // large
 817                      NULL,          // requested_address
 818                      0);            // noacces_prefix
 819 
 820     test_log(" rs.special() == %d", rs.special());
 821 
 822     assert(rs.base() != NULL, "Must be");
 823     assert(rs.size() == size, "Must be");
 824 
 825     assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
 826     assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
 827 
 828     if (rs.special()) {
 829       small_page_write(rs.base(), size);
 830     }
 831 
 832     release_memory_for_test(rs);
 833   }
 834 
 835   static void test_reserved_space2(size_t size) {
 836     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
 837 
 838     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
 839 
 840     ReservedSpace rs(size);
 841 
 842     test_log(" rs.special() == %d", rs.special());
 843 
 844     assert(rs.base() != NULL, "Must be");
 845     assert(rs.size() == size, "Must be");
 846 
 847     if (rs.special()) {
 848       small_page_write(rs.base(), size);
 849     }
 850 
 851     release_memory_for_test(rs);
 852   }
 853 
 854   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
 855     test_log("test_reserved_space3(%p, %p, %d)",
 856         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
 857 
 858     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
 859     assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
 860 
 861     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
 862 
 863     ReservedSpace rs(size, alignment, large, false);
 864 
 865     test_log(" rs.special() == %d", rs.special());
 866 
 867     assert(rs.base() != NULL, "Must be");
 868     assert(rs.size() == size, "Must be");
 869 
 870     if (rs.special()) {
 871       small_page_write(rs.base(), size);
 872     }
 873 
 874     release_memory_for_test(rs);
 875   }
 876 
 877 
 878   static void test_reserved_space1() {
 879     size_t size = 2 * 1024 * 1024;
 880     size_t ag   = os::vm_allocation_granularity();
 881 
 882     test_reserved_space1(size,      ag);
 883     test_reserved_space1(size * 2,  ag);
 884     test_reserved_space1(size * 10, ag);
 885   }
 886 
 887   static void test_reserved_space2() {
 888     size_t size = 2 * 1024 * 1024;
 889     size_t ag = os::vm_allocation_granularity();
 890 
 891     test_reserved_space2(size * 1);
 892     test_reserved_space2(size * 2);
 893     test_reserved_space2(size * 10);
 894     test_reserved_space2(ag);
 895     test_reserved_space2(size - ag);
 896     test_reserved_space2(size);
 897     test_reserved_space2(size + ag);
 898     test_reserved_space2(size * 2);
 899     test_reserved_space2(size * 2 - ag);
 900     test_reserved_space2(size * 2 + ag);
 901     test_reserved_space2(size * 3);
 902     test_reserved_space2(size * 3 - ag);
 903     test_reserved_space2(size * 3 + ag);
 904     test_reserved_space2(size * 10);
 905     test_reserved_space2(size * 10 + size / 2);
 906   }
 907 
 908   static void test_reserved_space3() {
 909     size_t ag = os::vm_allocation_granularity();
 910 
 911     test_reserved_space3(ag,      ag    , false);
 912     test_reserved_space3(ag * 2,  ag    , false);
 913     test_reserved_space3(ag * 3,  ag    , false);
 914     test_reserved_space3(ag * 2,  ag * 2, false);
 915     test_reserved_space3(ag * 4,  ag * 2, false);
 916     test_reserved_space3(ag * 8,  ag * 2, false);
 917     test_reserved_space3(ag * 4,  ag * 4, false);
 918     test_reserved_space3(ag * 8,  ag * 4, false);
 919     test_reserved_space3(ag * 16, ag * 4, false);
 920 
 921     if (UseLargePages) {
 922       size_t lp = os::large_page_size();
 923 
 924       // Without large pages
 925       test_reserved_space3(lp,     ag * 4, false);
 926       test_reserved_space3(lp * 2, ag * 4, false);
 927       test_reserved_space3(lp * 4, ag * 4, false);
 928       test_reserved_space3(lp,     lp    , false);
 929       test_reserved_space3(lp * 2, lp    , false);
 930       test_reserved_space3(lp * 3, lp    , false);
 931       test_reserved_space3(lp * 2, lp * 2, false);
 932       test_reserved_space3(lp * 4, lp * 2, false);
 933       test_reserved_space3(lp * 8, lp * 2, false);
 934 
 935       // With large pages
 936       test_reserved_space3(lp, ag * 4    , true);
 937       test_reserved_space3(lp * 2, ag * 4, true);
 938       test_reserved_space3(lp * 4, ag * 4, true);
 939       test_reserved_space3(lp, lp        , true);
 940       test_reserved_space3(lp * 2, lp    , true);
 941       test_reserved_space3(lp * 3, lp    , true);
 942       test_reserved_space3(lp * 2, lp * 2, true);
 943       test_reserved_space3(lp * 4, lp * 2, true);
 944       test_reserved_space3(lp * 8, lp * 2, true);
 945     }
 946   }
 947 
 948   static void test_reserved_space() {
 949     test_reserved_space1();
 950     test_reserved_space2();
 951     test_reserved_space3();
 952   }
 953 };
 954 
 955 void TestReservedSpace_test() {
 956   TestReservedSpace::test_reserved_space();
 957 }
 958 
 959 #define assert_equals(actual, expected)     \
 960   assert(actual == expected,                \
 961     err_msg("Got " SIZE_FORMAT " expected " \
 962       SIZE_FORMAT, actual, expected));
 963 
 964 #define assert_ge(value1, value2)                  \
 965   assert(value1 >= value2,                         \
 966     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
 967       #value2 "': " SIZE_FORMAT, value1, value2));
 968 
 969 #define assert_lt(value1, value2)                  \
 970   assert(value1 < value2,                          \
 971     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
 972       #value2 "': " SIZE_FORMAT, value1, value2));
 973 
 974 
 975 class TestVirtualSpace : AllStatic {
 976   enum TestLargePages {
 977     Default,
 978     Disable,
 979     Reserve,
 980     Commit
 981   };
 982 
 983   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
 984     switch(mode) {
 985     default:
 986     case Default:
 987     case Reserve:
 988       return ReservedSpace(reserve_size_aligned);
 989     case Disable:
 990     case Commit:
 991       return ReservedSpace(reserve_size_aligned,
 992                            os::vm_allocation_granularity(),
 993                            /* large */ false, /* exec */ false);
 994     }
 995   }
 996 
 997   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
 998     switch(mode) {
 999     default:
1000     case Default:
1001     case Reserve:
1002       return vs.initialize(rs, 0);
1003     case Disable:
1004       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1005     case Commit:
1006       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region(rs.size(), rs.size(), 1));
1007     }
1008   }
1009 
1010  public:
1011   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1012                                                         TestLargePages mode = Default) {
1013     size_t granularity = os::vm_allocation_granularity();
1014     size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
1015 
1016     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1017 
1018     assert(reserved.is_reserved(), "Must be");
1019 
1020     VirtualSpace vs;
1021     bool initialized = initialize_virtual_space(vs, reserved, mode);
1022     assert(initialized, "Failed to initialize VirtualSpace");
1023 
1024     vs.expand_by(commit_size, false);
1025 
1026     if (vs.special()) {
1027       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1028     } else {
1029       assert_ge(vs.actual_committed_size(), commit_size);
1030       // Approximate the commit granularity.
1031       // Make sure that we don't commit using large pages
1032       // if large pages has been disabled for this VirtualSpace.
1033       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1034                                    os::vm_page_size() : os::large_page_size();
1035       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1036     }
1037 
1038     reserved.release();
1039   }
1040 
1041   static void test_virtual_space_actual_committed_space_one_large_page() {
1042     if (!UseLargePages) {
1043       return;
1044     }
1045 
1046     size_t large_page_size = os::large_page_size();
1047 
1048     ReservedSpace reserved(large_page_size, large_page_size, true, false);
1049 
1050     assert(reserved.is_reserved(), "Must be");
1051 
1052     VirtualSpace vs;
1053     bool initialized = vs.initialize(reserved, 0);
1054     assert(initialized, "Failed to initialize VirtualSpace");
1055 
1056     vs.expand_by(large_page_size, false);
1057 
1058     assert_equals(vs.actual_committed_size(), large_page_size);
1059 
1060     reserved.release();
1061   }
1062 
1063   static void test_virtual_space_actual_committed_space() {
1064     test_virtual_space_actual_committed_space(4 * K, 0);
1065     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1066     test_virtual_space_actual_committed_space(8 * K, 0);
1067     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1068     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1069     test_virtual_space_actual_committed_space(12 * K, 0);
1070     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1071     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1072     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1073     test_virtual_space_actual_committed_space(64 * K, 0);
1074     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1075     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1076     test_virtual_space_actual_committed_space(2 * M, 0);
1077     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1078     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1079     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1080     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1081     test_virtual_space_actual_committed_space(10 * M, 0);
1082     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1083     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1084     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1085     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1086     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1087     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1088   }
1089 
1090   static void test_virtual_space_disable_large_pages() {
1091     if (!UseLargePages) {
1092       return;
1093     }
1094     // These test cases verify that if we force VirtualSpace to disable large pages
1095     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1096     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1097     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1098     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1099     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1100     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1101     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1102 
1103     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1104     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1105     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1106     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1107     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1108     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1109     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1110 
1111     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1112     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1113     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1114     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1115     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1116     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1117     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1118   }
1119 
1120   static void test_virtual_space() {
1121     test_virtual_space_actual_committed_space();
1122     test_virtual_space_actual_committed_space_one_large_page();
1123     test_virtual_space_disable_large_pages();
1124   }
1125 };
1126 
1127 void TestVirtualSpace_test() {
1128   TestVirtualSpace::test_virtual_space();
1129 }
1130 
1131 #endif // PRODUCT
1132 
1133 #endif