1 /*
   2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "oops/markOop.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "runtime/virtualspace.hpp"
  29 #include "services/memTracker.hpp"
  30 
  31 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  32 
  33 // ReservedSpace
  34 
  35 // Dummy constructor
  36 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  37     _alignment(0), _special(false), _executable(false) {
  38 }
  39 
  40 ReservedSpace::ReservedSpace(size_t size) {
  41   size_t page_size = os::page_size_for_region(size, 1);
  42   bool large_pages = page_size != (size_t)os::vm_page_size();
  43   // Don't force the alignment to be large page aligned,
  44   // since that will waste memory.
  45   size_t alignment = os::vm_allocation_granularity();
  46   initialize(size, alignment, large_pages, NULL, 0, false);
  47 }
  48 
  49 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  50                              bool large,
  51                              char* requested_address,
  52                              const size_t noaccess_prefix) {
  53   initialize(size+noaccess_prefix, alignment, large, requested_address,
  54              noaccess_prefix, false);
  55 }
  56 
  57 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  58                              bool large,
  59                              bool executable) {
  60   initialize(size, alignment, large, NULL, 0, executable);
  61 }
  62 
  63 // Helper method.
  64 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  65                                            const size_t size, bool special)
  66 {
  67   if (base == requested_address || requested_address == NULL)
  68     return false; // did not fail
  69 
  70   if (base != NULL) {
  71     // Different reserve address may be acceptable in other cases
  72     // but for compressed oops heap should be at requested address.
  73 
  74     // Or at least in the requested mode.
  75     if ((uint64_t)base >= HeapBaseMinAddress) {
  76       if ((uint64_t)requested_address + size < UnscaledOopHeapMax) {
  77         // Requested unscaled mode.
  78         if ((uint64_t)base            + size < UnscaledOopHeapMax) {
  79           // Reserved unscaled mode.
  80           if (PrintCompressedOopsMode) {
  81             tty->print("base: %p, req_addr: %p, base+size: %p but fulfills unscaled criteria.\n", base, requested_address, base+size);
  82           }
  83           return false;
  84         }
  85       } else if ((uint64_t)requested_address + size < OopEncodingHeapMax &&
  86                  (uint64_t)base              + size < OopEncodingHeapMax) {
  87         // Requested and reserved zerobased mode.
  88         tty->print("base: %p, req_addr: %p, base+size: %p but fulfills zerobased criteria.\n", base, requested_address, base+size);
  89         return false;
  90       }
  91     }
  92 
  93     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  94     if (PrintCompressedOopsMode) {
  95       tty->cr();
  96       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
  97     }
  98     // OS ignored requested address. Try different address.
  99     if (special) {
 100       if (!os::release_memory_special(base, size)) {
 101         fatal("os::release_memory_special failed");
 102       }
 103     } else {
 104       if (!os::release_memory(base, size)) {
 105         fatal("os::release_memory failed");
 106       }
 107     }
 108   }
 109   return true;
 110 }
 111 
 112 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
 113                                char* requested_address,
 114                                const size_t noaccess_prefix,
 115                                bool executable) {
 116   const size_t granularity = os::vm_allocation_granularity();
 117   assert((size & (granularity - 1)) == 0,
 118          "size not aligned to os::vm_allocation_granularity()");
 119   assert((alignment & (granularity - 1)) == 0,
 120          "alignment not aligned to os::vm_allocation_granularity()");
 121   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 122          "not a power of 2");
 123 
 124   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 125 
 126   // Assert that if noaccess_prefix is used, it is the same as alignment.
 127   assert(noaccess_prefix == 0 ||
 128          noaccess_prefix == alignment, "noaccess prefix wrong");
 129 
 130   _base = NULL;
 131   _size = 0;
 132   _special = false;
 133   _executable = executable;
 134   _alignment = 0;
 135   _noaccess_prefix = 0;
 136   if (size == 0) {
 137     return;
 138   }
 139 
 140   // If OS doesn't support demand paging for large page memory, we need
 141   // to use reserve_memory_special() to reserve and pin the entire region.
 142   bool special = large && !os::can_commit_large_page_memory();
 143   char* base = NULL;
 144 
 145   if (requested_address != 0) {
 146     requested_address -= noaccess_prefix; // adjust requested address
 147     assert(requested_address != NULL, "huge noaccess prefix?");
 148   }
 149 
 150   if (special) {
 151 
 152     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 153 
 154     if (base != NULL) {
 155       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 156         // OS ignored requested address. Try different address.
 157         return;
 158       }
 159       // Check alignment constraints.
 160       assert((uintptr_t) base % alignment == 0,
 161              err_msg("Large pages returned a non-aligned address, base: "
 162                  PTR_FORMAT " alignment: " PTR_FORMAT,
 163                  base, (void*)(uintptr_t)alignment));
 164       _special = true;
 165     } else {
 166       // failed; try to reserve regular memory below
 167       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 168                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 169         if (PrintCompressedOopsMode) {
 170           tty->cr();
 171           tty->print_cr("Reserve regular memory without large pages.");
 172         }
 173       }
 174     }
 175   }
 176 
 177   if (base == NULL) {
 178     // Optimistically assume that the OSes returns an aligned base pointer.
 179     // When reserving a large address range, most OSes seem to align to at
 180     // least 64K.
 181 
 182     // If the memory was requested at a particular address, use
 183     // os::attempt_reserve_memory_at() to avoid over mapping something
 184     // important.  If available space is not detected, return NULL.
 185 
 186     if (requested_address != 0) {
 187       base = os::attempt_reserve_memory_at(size, requested_address);
 188       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 189         // OS ignored requested address. Try different address.
 190         base = NULL;
 191       }
 192     } else {
 193       base = os::reserve_memory(size, NULL, alignment);
 194     }
 195 
 196     if (base == NULL) return;
 197 
 198     // Check alignment constraints
 199     if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
 200       // Base not aligned, retry
 201       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 202       // Make sure that size is aligned
 203       size = align_size_up(size, alignment);
 204       base = os::reserve_memory_aligned(size, alignment);
 205 
 206       if (requested_address != 0 &&
 207           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 208         // As a result of the alignment constraints, the allocated base differs
 209         // from the requested address. Return back to the caller who can
 210         // take remedial action (like try again without a requested address).
 211         assert(_base == NULL, "should be");
 212         return;
 213       }
 214     }
 215   }
 216   // Done
 217   _base = base;
 218   _size = size;
 219   _alignment = alignment;
 220   _noaccess_prefix = noaccess_prefix;
 221 
 222   // Assert that if noaccess_prefix is used, it is the same as alignment.
 223   assert(noaccess_prefix == 0 ||
 224          noaccess_prefix == _alignment, "noaccess prefix wrong");
 225 
 226   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 227          "area must be distinguishable from marks for mark-sweep");
 228   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 229          "area must be distinguishable from marks for mark-sweep");
 230 }
 231 
 232 
 233 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 234                              bool special, bool executable) {
 235   assert((size % os::vm_allocation_granularity()) == 0,
 236          "size not allocation aligned");
 237   _base = base;
 238   _size = size;
 239   _alignment = alignment;
 240   _noaccess_prefix = 0;
 241   _special = special;
 242   _executable = executable;
 243 }
 244 
 245 
 246 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 247                                         bool split, bool realloc) {
 248   assert(partition_size <= size(), "partition failed");
 249   if (split) {
 250     os::split_reserved_memory(base(), size(), partition_size, realloc);
 251   }
 252   ReservedSpace result(base(), partition_size, alignment, special(),
 253                        executable());
 254   return result;
 255 }
 256 
 257 
 258 ReservedSpace
 259 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 260   assert(partition_size <= size(), "partition failed");
 261   ReservedSpace result(base() + partition_size, size() - partition_size,
 262                        alignment, special(), executable());
 263   return result;
 264 }
 265 
 266 
 267 size_t ReservedSpace::page_align_size_up(size_t size) {
 268   return align_size_up(size, os::vm_page_size());
 269 }
 270 
 271 
 272 size_t ReservedSpace::page_align_size_down(size_t size) {
 273   return align_size_down(size, os::vm_page_size());
 274 }
 275 
 276 
 277 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 278   return align_size_up(size, os::vm_allocation_granularity());
 279 }
 280 
 281 
 282 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 283   return align_size_down(size, os::vm_allocation_granularity());
 284 }
 285 
 286 
 287 void ReservedSpace::release() {
 288   if (is_reserved()) {
 289     char *real_base = _base - _noaccess_prefix;
 290     const size_t real_size = _size + _noaccess_prefix;
 291     if (special()) {
 292       os::release_memory_special(real_base, real_size);
 293     } else{
 294       os::release_memory(real_base, real_size);
 295     }
 296     _base = NULL;
 297     _size = 0;
 298     _noaccess_prefix = 0;
 299     _special = false;
 300     _executable = false;
 301   }
 302 }
 303 
 304 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
 305 
 306   // If there is no noaccess prefix, return.
 307   if (_noaccess_prefix == 0) return;
 308 
 309   assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
 310          "must be at least page size big");
 311 
 312   if (true
 313       WIN64_ONLY(&& !UseLargePages)
 314       AIX_ONLY(&& os::vm_page_size() != SIZE_64K)) {
 315     // Protect memory at the base of the allocated region.
 316     // If special, the page was committed (only matters on windows)
 317     if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
 318                             _special)) {
 319       fatal("cannot protect protection page");
 320     }
 321     if (PrintCompressedOopsMode) {
 322       tty->cr();
 323       tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
 324     }
 325     assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
 326   } else {
 327     Universe::set_narrow_oop_use_implicit_null_checks(false);
 328   }
 329 
 330   _base += _noaccess_prefix;
 331   _size -= _noaccess_prefix;
 332   assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
 333          "must be exactly of required size and alignment");
 334 }
 335 
 336 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
 337                                      bool large, char* requested_address) :
 338   ReservedSpace(size, alignment, large,
 339                 requested_address,
 340                 (UseCompressedOops && (requested_address == NULL || requested_address+size > (char*)OopEncodingHeapMax) ?
 341                  noaccess_prefix_size(alignment) : 0)) {
 342 
 343   if (base() > 0) {
 344     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 345   }
 346 
 347   // Only reserved space for the java heap should have a noaccess_prefix
 348   // if using compressed oops.
 349   protect_noaccess_prefix(size);
 350 }
 351 
 352 size_t ReservedHeapSpace::noaccess_prefix_size(size_t alignment) {
 353   return lcm(os::vm_page_size(), alignment);
 354 }
 355 // Reserve space for code segment.  Same as Java heap only we mark this as
 356 // executable.
 357 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 358                                      size_t rs_align,
 359                                      bool large) :
 360   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 361   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 362 }
 363 
 364 // VirtualSpace
 365 
 366 VirtualSpace::VirtualSpace() {
 367   _low_boundary           = NULL;
 368   _high_boundary          = NULL;
 369   _low                    = NULL;
 370   _high                   = NULL;
 371   _lower_high             = NULL;
 372   _middle_high            = NULL;
 373   _upper_high             = NULL;
 374   _lower_high_boundary    = NULL;
 375   _middle_high_boundary   = NULL;
 376   _upper_high_boundary    = NULL;
 377   _lower_alignment        = 0;
 378   _middle_alignment       = 0;
 379   _upper_alignment        = 0;
 380   _special                = false;
 381   _executable             = false;
 382 }
 383 
 384 
 385 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 386   const size_t max_commit_granularity = os::page_size_for_region(rs.size(), 1);
 387   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 388 }
 389 
 390 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 391   if(!rs.is_reserved()) return false;  // allocation failed.
 392   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 393   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 394 
 395   _low_boundary  = rs.base();
 396   _high_boundary = low_boundary() + rs.size();
 397 
 398   _low = low_boundary();
 399   _high = low();
 400 
 401   _special = rs.special();
 402   _executable = rs.executable();
 403 
 404   // When a VirtualSpace begins life at a large size, make all future expansion
 405   // and shrinking occur aligned to a granularity of large pages.  This avoids
 406   // fragmentation of physical addresses that inhibits the use of large pages
 407   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 408   // page size, the only spaces that get handled this way are codecache and
 409   // the heap itself, both of which provide a substantial performance
 410   // boost in many benchmarks when covered by large pages.
 411   //
 412   // No attempt is made to force large page alignment at the very top and
 413   // bottom of the space if they are not aligned so already.
 414   _lower_alignment  = os::vm_page_size();
 415   _middle_alignment = max_commit_granularity;
 416   _upper_alignment  = os::vm_page_size();
 417 
 418   // End of each region
 419   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
 420   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
 421   _upper_high_boundary = high_boundary();
 422 
 423   // High address of each region
 424   _lower_high = low_boundary();
 425   _middle_high = lower_high_boundary();
 426   _upper_high = middle_high_boundary();
 427 
 428   // commit to initial size
 429   if (committed_size > 0) {
 430     if (!expand_by(committed_size)) {
 431       return false;
 432     }
 433   }
 434   return true;
 435 }
 436 
 437 
 438 VirtualSpace::~VirtualSpace() {
 439   release();
 440 }
 441 
 442 
 443 void VirtualSpace::release() {
 444   // This does not release memory it never reserved.
 445   // Caller must release via rs.release();
 446   _low_boundary           = NULL;
 447   _high_boundary          = NULL;
 448   _low                    = NULL;
 449   _high                   = NULL;
 450   _lower_high             = NULL;
 451   _middle_high            = NULL;
 452   _upper_high             = NULL;
 453   _lower_high_boundary    = NULL;
 454   _middle_high_boundary   = NULL;
 455   _upper_high_boundary    = NULL;
 456   _lower_alignment        = 0;
 457   _middle_alignment       = 0;
 458   _upper_alignment        = 0;
 459   _special                = false;
 460   _executable             = false;
 461 }
 462 
 463 
 464 size_t VirtualSpace::committed_size() const {
 465   return pointer_delta(high(), low(), sizeof(char));
 466 }
 467 
 468 
 469 size_t VirtualSpace::reserved_size() const {
 470   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 471 }
 472 
 473 
 474 size_t VirtualSpace::uncommitted_size()  const {
 475   return reserved_size() - committed_size();
 476 }
 477 
 478 size_t VirtualSpace::actual_committed_size() const {
 479   // Special VirtualSpaces commit all reserved space up front.
 480   if (special()) {
 481     return reserved_size();
 482   }
 483 
 484   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 485   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 486   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 487 
 488 #ifdef ASSERT
 489   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 490   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 491   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 492 
 493   if (committed_high > 0) {
 494     assert(committed_low == lower, "Must be");
 495     assert(committed_middle == middle, "Must be");
 496   }
 497 
 498   if (committed_middle > 0) {
 499     assert(committed_low == lower, "Must be");
 500   }
 501   if (committed_middle < middle) {
 502     assert(committed_high == 0, "Must be");
 503   }
 504 
 505   if (committed_low < lower) {
 506     assert(committed_high == 0, "Must be");
 507     assert(committed_middle == 0, "Must be");
 508   }
 509 #endif
 510 
 511   return committed_low + committed_middle + committed_high;
 512 }
 513 
 514 
 515 bool VirtualSpace::contains(const void* p) const {
 516   return low() <= (const char*) p && (const char*) p < high();
 517 }
 518 
 519 /*
 520    First we need to determine if a particular virtual space is using large
 521    pages.  This is done at the initialize function and only virtual spaces
 522    that are larger than LargePageSizeInBytes use large pages.  Once we
 523    have determined this, all expand_by and shrink_by calls must grow and
 524    shrink by large page size chunks.  If a particular request
 525    is within the current large page, the call to commit and uncommit memory
 526    can be ignored.  In the case that the low and high boundaries of this
 527    space is not large page aligned, the pages leading to the first large
 528    page address and the pages after the last large page address must be
 529    allocated with default pages.
 530 */
 531 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 532   if (uncommitted_size() < bytes) return false;
 533 
 534   if (special()) {
 535     // don't commit memory if the entire space is pinned in memory
 536     _high += bytes;
 537     return true;
 538   }
 539 
 540   char* previous_high = high();
 541   char* unaligned_new_high = high() + bytes;
 542   assert(unaligned_new_high <= high_boundary(),
 543          "cannot expand by more than upper boundary");
 544 
 545   // Calculate where the new high for each of the regions should be.  If
 546   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 547   // then the unaligned lower and upper new highs would be the
 548   // lower_high() and upper_high() respectively.
 549   char* unaligned_lower_new_high =
 550     MIN2(unaligned_new_high, lower_high_boundary());
 551   char* unaligned_middle_new_high =
 552     MIN2(unaligned_new_high, middle_high_boundary());
 553   char* unaligned_upper_new_high =
 554     MIN2(unaligned_new_high, upper_high_boundary());
 555 
 556   // Align the new highs based on the regions alignment.  lower and upper
 557   // alignment will always be default page size.  middle alignment will be
 558   // LargePageSizeInBytes if the actual size of the virtual space is in
 559   // fact larger than LargePageSizeInBytes.
 560   char* aligned_lower_new_high =
 561     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 562   char* aligned_middle_new_high =
 563     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 564   char* aligned_upper_new_high =
 565     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 566 
 567   // Determine which regions need to grow in this expand_by call.
 568   // If you are growing in the lower region, high() must be in that
 569   // region so calculate the size based on high().  For the middle and
 570   // upper regions, determine the starting point of growth based on the
 571   // location of high().  By getting the MAX of the region's low address
 572   // (or the previous region's high address) and high(), we can tell if it
 573   // is an intra or inter region growth.
 574   size_t lower_needs = 0;
 575   if (aligned_lower_new_high > lower_high()) {
 576     lower_needs =
 577       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 578   }
 579   size_t middle_needs = 0;
 580   if (aligned_middle_new_high > middle_high()) {
 581     middle_needs =
 582       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 583   }
 584   size_t upper_needs = 0;
 585   if (aligned_upper_new_high > upper_high()) {
 586     upper_needs =
 587       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 588   }
 589 
 590   // Check contiguity.
 591   assert(low_boundary() <= lower_high() &&
 592          lower_high() <= lower_high_boundary(),
 593          "high address must be contained within the region");
 594   assert(lower_high_boundary() <= middle_high() &&
 595          middle_high() <= middle_high_boundary(),
 596          "high address must be contained within the region");
 597   assert(middle_high_boundary() <= upper_high() &&
 598          upper_high() <= upper_high_boundary(),
 599          "high address must be contained within the region");
 600 
 601   // Commit regions
 602   if (lower_needs > 0) {
 603     assert(low_boundary() <= lower_high() &&
 604            lower_high() + lower_needs <= lower_high_boundary(),
 605            "must not expand beyond region");
 606     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
 607       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 608                          ", lower_needs=" SIZE_FORMAT ", %d) failed",
 609                          lower_high(), lower_needs, _executable);)
 610       return false;
 611     } else {
 612       _lower_high += lower_needs;
 613     }
 614   }
 615   if (middle_needs > 0) {
 616     assert(lower_high_boundary() <= middle_high() &&
 617            middle_high() + middle_needs <= middle_high_boundary(),
 618            "must not expand beyond region");
 619     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
 620                            _executable)) {
 621       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 622                          ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
 623                          ", %d) failed", middle_high(), middle_needs,
 624                          middle_alignment(), _executable);)
 625       return false;
 626     }
 627     _middle_high += middle_needs;
 628   }
 629   if (upper_needs > 0) {
 630     assert(middle_high_boundary() <= upper_high() &&
 631            upper_high() + upper_needs <= upper_high_boundary(),
 632            "must not expand beyond region");
 633     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
 634       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 635                          ", upper_needs=" SIZE_FORMAT ", %d) failed",
 636                          upper_high(), upper_needs, _executable);)
 637       return false;
 638     } else {
 639       _upper_high += upper_needs;
 640     }
 641   }
 642 
 643   if (pre_touch || AlwaysPreTouch) {
 644     int vm_ps = os::vm_page_size();
 645     for (char* curr = previous_high;
 646          curr < unaligned_new_high;
 647          curr += vm_ps) {
 648       // Note the use of a write here; originally we tried just a read, but
 649       // since the value read was unused, the optimizer removed the read.
 650       // If we ever have a concurrent touchahead thread, we'll want to use
 651       // a read, to avoid the potential of overwriting data (if a mutator
 652       // thread beats the touchahead thread to a page).  There are various
 653       // ways of making sure this read is not optimized away: for example,
 654       // generating the code for a read procedure at runtime.
 655       *curr = 0;
 656     }
 657   }
 658 
 659   _high += bytes;
 660   return true;
 661 }
 662 
 663 // A page is uncommitted if the contents of the entire page is deemed unusable.
 664 // Continue to decrement the high() pointer until it reaches a page boundary
 665 // in which case that particular page can now be uncommitted.
 666 void VirtualSpace::shrink_by(size_t size) {
 667   if (committed_size() < size)
 668     fatal("Cannot shrink virtual space to negative size");
 669 
 670   if (special()) {
 671     // don't uncommit if the entire space is pinned in memory
 672     _high -= size;
 673     return;
 674   }
 675 
 676   char* unaligned_new_high = high() - size;
 677   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 678 
 679   // Calculate new unaligned address
 680   char* unaligned_upper_new_high =
 681     MAX2(unaligned_new_high, middle_high_boundary());
 682   char* unaligned_middle_new_high =
 683     MAX2(unaligned_new_high, lower_high_boundary());
 684   char* unaligned_lower_new_high =
 685     MAX2(unaligned_new_high, low_boundary());
 686 
 687   // Align address to region's alignment
 688   char* aligned_upper_new_high =
 689     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 690   char* aligned_middle_new_high =
 691     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 692   char* aligned_lower_new_high =
 693     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 694 
 695   // Determine which regions need to shrink
 696   size_t upper_needs = 0;
 697   if (aligned_upper_new_high < upper_high()) {
 698     upper_needs =
 699       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 700   }
 701   size_t middle_needs = 0;
 702   if (aligned_middle_new_high < middle_high()) {
 703     middle_needs =
 704       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 705   }
 706   size_t lower_needs = 0;
 707   if (aligned_lower_new_high < lower_high()) {
 708     lower_needs =
 709       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 710   }
 711 
 712   // Check contiguity.
 713   assert(middle_high_boundary() <= upper_high() &&
 714          upper_high() <= upper_high_boundary(),
 715          "high address must be contained within the region");
 716   assert(lower_high_boundary() <= middle_high() &&
 717          middle_high() <= middle_high_boundary(),
 718          "high address must be contained within the region");
 719   assert(low_boundary() <= lower_high() &&
 720          lower_high() <= lower_high_boundary(),
 721          "high address must be contained within the region");
 722 
 723   // Uncommit
 724   if (upper_needs > 0) {
 725     assert(middle_high_boundary() <= aligned_upper_new_high &&
 726            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 727            "must not shrink beyond region");
 728     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 729       debug_only(warning("os::uncommit_memory failed"));
 730       return;
 731     } else {
 732       _upper_high -= upper_needs;
 733     }
 734   }
 735   if (middle_needs > 0) {
 736     assert(lower_high_boundary() <= aligned_middle_new_high &&
 737            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 738            "must not shrink beyond region");
 739     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 740       debug_only(warning("os::uncommit_memory failed"));
 741       return;
 742     } else {
 743       _middle_high -= middle_needs;
 744     }
 745   }
 746   if (lower_needs > 0) {
 747     assert(low_boundary() <= aligned_lower_new_high &&
 748            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 749            "must not shrink beyond region");
 750     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 751       debug_only(warning("os::uncommit_memory failed"));
 752       return;
 753     } else {
 754       _lower_high -= lower_needs;
 755     }
 756   }
 757 
 758   _high -= size;
 759 }
 760 
 761 #ifndef PRODUCT
 762 void VirtualSpace::check_for_contiguity() {
 763   // Check contiguity.
 764   assert(low_boundary() <= lower_high() &&
 765          lower_high() <= lower_high_boundary(),
 766          "high address must be contained within the region");
 767   assert(lower_high_boundary() <= middle_high() &&
 768          middle_high() <= middle_high_boundary(),
 769          "high address must be contained within the region");
 770   assert(middle_high_boundary() <= upper_high() &&
 771          upper_high() <= upper_high_boundary(),
 772          "high address must be contained within the region");
 773   assert(low() >= low_boundary(), "low");
 774   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
 775   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
 776   assert(high() <= upper_high(), "upper high");
 777 }
 778 
 779 void VirtualSpace::print_on(outputStream* out) {
 780   out->print   ("Virtual space:");
 781   if (special()) out->print(" (pinned in memory)");
 782   out->cr();
 783   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
 784   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
 785   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
 786   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
 787 }
 788 
 789 void VirtualSpace::print() {
 790   print_on(tty);
 791 }
 792 
 793 /////////////// Unit tests ///////////////
 794 
 795 #ifndef PRODUCT
 796 
 797 #define test_log(...) \
 798   do {\
 799     if (VerboseInternalVMTests) { \
 800       tty->print_cr(__VA_ARGS__); \
 801       tty->flush(); \
 802     }\
 803   } while (false)
 804 
 805 class TestReservedSpace : AllStatic {
 806  public:
 807   static void small_page_write(void* addr, size_t size) {
 808     size_t page_size = os::vm_page_size();
 809 
 810     char* end = (char*)addr + size;
 811     for (char* p = (char*)addr; p < end; p += page_size) {
 812       *p = 1;
 813     }
 814   }
 815 
 816   static void release_memory_for_test(ReservedSpace rs) {
 817     if (rs.special()) {
 818       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
 819     } else {
 820       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
 821     }
 822   }
 823 
 824   static void test_reserved_space1(size_t size, size_t alignment) {
 825     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
 826 
 827     assert(is_size_aligned(size, alignment), "Incorrect input parameters");
 828 
 829     ReservedSpace rs(size,          // size
 830                      alignment,     // alignment
 831                      UseLargePages, // large
 832                      NULL,          // requested_address
 833                      0);            // noacces_prefix
 834 
 835     test_log(" rs.special() == %d", rs.special());
 836 
 837     assert(rs.base() != NULL, "Must be");
 838     assert(rs.size() == size, "Must be");
 839 
 840     assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
 841     assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
 842 
 843     if (rs.special()) {
 844       small_page_write(rs.base(), size);
 845     }
 846 
 847     release_memory_for_test(rs);
 848   }
 849 
 850   static void test_reserved_space2(size_t size) {
 851     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
 852 
 853     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
 854 
 855     ReservedSpace rs(size);
 856 
 857     test_log(" rs.special() == %d", rs.special());
 858 
 859     assert(rs.base() != NULL, "Must be");
 860     assert(rs.size() == size, "Must be");
 861 
 862     if (rs.special()) {
 863       small_page_write(rs.base(), size);
 864     }
 865 
 866     release_memory_for_test(rs);
 867   }
 868 
 869   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
 870     test_log("test_reserved_space3(%p, %p, %d)",
 871         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
 872 
 873     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
 874     assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
 875 
 876     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
 877 
 878     ReservedSpace rs(size, alignment, large, false);
 879 
 880     test_log(" rs.special() == %d", rs.special());
 881 
 882     assert(rs.base() != NULL, "Must be");
 883     assert(rs.size() == size, "Must be");
 884 
 885     if (rs.special()) {
 886       small_page_write(rs.base(), size);
 887     }
 888 
 889     release_memory_for_test(rs);
 890   }
 891 
 892 
 893   static void test_reserved_space1() {
 894     size_t size = 2 * 1024 * 1024;
 895     size_t ag   = os::vm_allocation_granularity();
 896 
 897     test_reserved_space1(size,      ag);
 898     test_reserved_space1(size * 2,  ag);
 899     test_reserved_space1(size * 10, ag);
 900   }
 901 
 902   static void test_reserved_space2() {
 903     size_t size = 2 * 1024 * 1024;
 904     size_t ag = os::vm_allocation_granularity();
 905 
 906     test_reserved_space2(size * 1);
 907     test_reserved_space2(size * 2);
 908     test_reserved_space2(size * 10);
 909     test_reserved_space2(ag);
 910     test_reserved_space2(size - ag);
 911     test_reserved_space2(size);
 912     test_reserved_space2(size + ag);
 913     test_reserved_space2(size * 2);
 914     test_reserved_space2(size * 2 - ag);
 915     test_reserved_space2(size * 2 + ag);
 916     test_reserved_space2(size * 3);
 917     test_reserved_space2(size * 3 - ag);
 918     test_reserved_space2(size * 3 + ag);
 919     test_reserved_space2(size * 10);
 920     test_reserved_space2(size * 10 + size / 2);
 921   }
 922 
 923   static void test_reserved_space3() {
 924     size_t ag = os::vm_allocation_granularity();
 925 
 926     test_reserved_space3(ag,      ag    , false);
 927     test_reserved_space3(ag * 2,  ag    , false);
 928     test_reserved_space3(ag * 3,  ag    , false);
 929     test_reserved_space3(ag * 2,  ag * 2, false);
 930     test_reserved_space3(ag * 4,  ag * 2, false);
 931     test_reserved_space3(ag * 8,  ag * 2, false);
 932     test_reserved_space3(ag * 4,  ag * 4, false);
 933     test_reserved_space3(ag * 8,  ag * 4, false);
 934     test_reserved_space3(ag * 16, ag * 4, false);
 935 
 936     if (UseLargePages) {
 937       size_t lp = os::large_page_size();
 938 
 939       // Without large pages
 940       test_reserved_space3(lp,     ag * 4, false);
 941       test_reserved_space3(lp * 2, ag * 4, false);
 942       test_reserved_space3(lp * 4, ag * 4, false);
 943       test_reserved_space3(lp,     lp    , false);
 944       test_reserved_space3(lp * 2, lp    , false);
 945       test_reserved_space3(lp * 3, lp    , false);
 946       test_reserved_space3(lp * 2, lp * 2, false);
 947       test_reserved_space3(lp * 4, lp * 2, false);
 948       test_reserved_space3(lp * 8, lp * 2, false);
 949 
 950       // With large pages
 951       test_reserved_space3(lp, ag * 4    , true);
 952       test_reserved_space3(lp * 2, ag * 4, true);
 953       test_reserved_space3(lp * 4, ag * 4, true);
 954       test_reserved_space3(lp, lp        , true);
 955       test_reserved_space3(lp * 2, lp    , true);
 956       test_reserved_space3(lp * 3, lp    , true);
 957       test_reserved_space3(lp * 2, lp * 2, true);
 958       test_reserved_space3(lp * 4, lp * 2, true);
 959       test_reserved_space3(lp * 8, lp * 2, true);
 960     }
 961   }
 962 
 963   static void test_reserved_space() {
 964     test_reserved_space1();
 965     test_reserved_space2();
 966     test_reserved_space3();
 967   }
 968 };
 969 
 970 void TestReservedSpace_test() {
 971   TestReservedSpace::test_reserved_space();
 972 }
 973 
 974 #define assert_equals(actual, expected)     \
 975   assert(actual == expected,                \
 976     err_msg("Got " SIZE_FORMAT " expected " \
 977       SIZE_FORMAT, actual, expected));
 978 
 979 #define assert_ge(value1, value2)                  \
 980   assert(value1 >= value2,                         \
 981     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
 982       #value2 "': " SIZE_FORMAT, value1, value2));
 983 
 984 #define assert_lt(value1, value2)                  \
 985   assert(value1 < value2,                          \
 986     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
 987       #value2 "': " SIZE_FORMAT, value1, value2));
 988 
 989 
 990 class TestVirtualSpace : AllStatic {
 991   enum TestLargePages {
 992     Default,
 993     Disable,
 994     Reserve,
 995     Commit
 996   };
 997 
 998   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
 999     switch(mode) {
1000     default:
1001     case Default:
1002     case Reserve:
1003       return ReservedSpace(reserve_size_aligned);
1004     case Disable:
1005     case Commit:
1006       return ReservedSpace(reserve_size_aligned,
1007                            os::vm_allocation_granularity(),
1008                            /* large */ false, /* exec */ false);
1009     }
1010   }
1011 
1012   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1013     switch(mode) {
1014     default:
1015     case Default:
1016     case Reserve:
1017       return vs.initialize(rs, 0);
1018     case Disable:
1019       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1020     case Commit:
1021       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region(rs.size(), 1));
1022     }
1023   }
1024 
1025  public:
1026   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1027                                                         TestLargePages mode = Default) {
1028     size_t granularity = os::vm_allocation_granularity();
1029     size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
1030 
1031     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1032 
1033     assert(reserved.is_reserved(), "Must be");
1034 
1035     VirtualSpace vs;
1036     bool initialized = initialize_virtual_space(vs, reserved, mode);
1037     assert(initialized, "Failed to initialize VirtualSpace");
1038 
1039     vs.expand_by(commit_size, false);
1040 
1041     if (vs.special()) {
1042       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1043     } else {
1044       assert_ge(vs.actual_committed_size(), commit_size);
1045       // Approximate the commit granularity.
1046       // Make sure that we don't commit using large pages
1047       // if large pages has been disabled for this VirtualSpace.
1048       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1049                                    os::vm_page_size() : os::large_page_size();
1050       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1051     }
1052 
1053     reserved.release();
1054   }
1055 
1056   static void test_virtual_space_actual_committed_space_one_large_page() {
1057     if (!UseLargePages) {
1058       return;
1059     }
1060 
1061     size_t large_page_size = os::large_page_size();
1062 
1063     ReservedSpace reserved(large_page_size, large_page_size, true, false);
1064 
1065     assert(reserved.is_reserved(), "Must be");
1066 
1067     VirtualSpace vs;
1068     bool initialized = vs.initialize(reserved, 0);
1069     assert(initialized, "Failed to initialize VirtualSpace");
1070 
1071     vs.expand_by(large_page_size, false);
1072 
1073     assert_equals(vs.actual_committed_size(), large_page_size);
1074 
1075     reserved.release();
1076   }
1077 
1078   static void test_virtual_space_actual_committed_space() {
1079     test_virtual_space_actual_committed_space(4 * K, 0);
1080     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1081     test_virtual_space_actual_committed_space(8 * K, 0);
1082     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1083     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1084     test_virtual_space_actual_committed_space(12 * K, 0);
1085     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1086     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1087     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1088     test_virtual_space_actual_committed_space(64 * K, 0);
1089     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1090     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1091     test_virtual_space_actual_committed_space(2 * M, 0);
1092     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1093     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1094     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1095     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1096     test_virtual_space_actual_committed_space(10 * M, 0);
1097     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1098     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1099     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1100     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1101     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1102     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1103   }
1104 
1105   static void test_virtual_space_disable_large_pages() {
1106     if (!UseLargePages) {
1107       return;
1108     }
1109     // These test cases verify that if we force VirtualSpace to disable large pages
1110     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1111     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1112     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1113     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1114     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1115     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1116     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1117 
1118     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1119     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1120     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1121     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1122     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1123     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1124     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1125 
1126     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1127     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1128     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1129     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1130     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1131     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1132     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1133   }
1134 
1135   static void test_virtual_space() {
1136     test_virtual_space_actual_committed_space();
1137     test_virtual_space_actual_committed_space_one_large_page();
1138     test_virtual_space_disable_large_pages();
1139   }
1140 };
1141 
1142 void TestVirtualSpace_test() {
1143   TestVirtualSpace::test_virtual_space();
1144 }
1145 
1146 #endif // PRODUCT
1147 
1148 #endif