1 /*
   2  * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "oops/markOop.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "runtime/virtualspace.hpp"
  29 #ifdef TARGET_OS_FAMILY_linux
  30 # include "os_linux.inline.hpp"
  31 #endif
  32 #ifdef TARGET_OS_FAMILY_solaris
  33 # include "os_solaris.inline.hpp"
  34 #endif
  35 #ifdef TARGET_OS_FAMILY_windows
  36 # include "os_windows.inline.hpp"
  37 #endif
  38 
  39 
  40 // ReservedSpace
  41 ReservedSpace::ReservedSpace(size_t size) {
  42   initialize(size, 0, false, NULL, 0, false);
  43 }
  44 
  45 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  46                              bool large,
  47                              char* requested_address,
  48                              const size_t noaccess_prefix) {
  49   initialize(size+noaccess_prefix, alignment, large, requested_address,
  50              noaccess_prefix, false);
  51 }
  52 
  53 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  54                              bool large,
  55                              bool executable) {
  56   initialize(size, alignment, large, NULL, 0, executable);
  57 }
  58 
  59 char *
  60 ReservedSpace::align_reserved_region(char* addr, const size_t len,
  61                                      const size_t prefix_size,
  62                                      const size_t prefix_align,
  63                                      const size_t suffix_size,
  64                                      const size_t suffix_align)
  65 {
  66   assert(addr != NULL, "sanity");
  67   const size_t required_size = prefix_size + suffix_size;
  68   assert(len >= required_size, "len too small");
  69 
  70   const size_t s = size_t(addr);
  71   const size_t beg_ofs = (s + prefix_size) & (suffix_align - 1);
  72   const size_t beg_delta = beg_ofs == 0 ? 0 : suffix_align - beg_ofs;
  73 
  74   if (len < beg_delta + required_size) {
  75      return NULL; // Cannot do proper alignment.
  76   }
  77   const size_t end_delta = len - (beg_delta + required_size);
  78 
  79   if (beg_delta != 0) {
  80     os::release_memory(addr, beg_delta);
  81   }
  82 
  83   if (end_delta != 0) {
  84     char* release_addr = (char*) (s + beg_delta + required_size);
  85     os::release_memory(release_addr, end_delta);
  86   }
  87 
  88   return (char*) (s + beg_delta);
  89 }
  90 
  91 char* ReservedSpace::reserve_and_align(const size_t reserve_size,
  92                                        const size_t prefix_size,
  93                                        const size_t prefix_align,
  94                                        const size_t suffix_size,
  95                                        const size_t suffix_align)
  96 {
  97   assert(reserve_size > prefix_size + suffix_size, "should not be here");
  98 
  99   char* raw_addr = os::reserve_memory(reserve_size, NULL, prefix_align);
 100   if (raw_addr == NULL) return NULL;
 101 
 102   char* result = align_reserved_region(raw_addr, reserve_size, prefix_size,
 103                                        prefix_align, suffix_size,
 104                                        suffix_align);
 105   if (result == NULL && !os::release_memory(raw_addr, reserve_size)) {
 106     fatal("os::release_memory failed");
 107   }
 108 
 109 #ifdef ASSERT
 110   if (result != NULL) {
 111     const size_t raw = size_t(raw_addr);
 112     const size_t res = size_t(result);
 113     assert(res >= raw, "alignment decreased start addr");
 114     assert(res + prefix_size + suffix_size <= raw + reserve_size,
 115            "alignment increased end addr");
 116     assert((res & (prefix_align - 1)) == 0, "bad alignment of prefix");
 117     assert(((res + prefix_size) & (suffix_align - 1)) == 0,
 118            "bad alignment of suffix");
 119   }
 120 #endif
 121 
 122   return result;
 123 }
 124 
 125 // Helper method.
 126 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
 127                                            const size_t size, bool special)
 128 {
 129   if (base == requested_address || requested_address == NULL)
 130     return false; // did not fail
 131 
 132   if (base != NULL) {
 133     // Different reserve address may be acceptable in other cases
 134     // but for compressed oops heap should be at requested address.
 135     assert(UseCompressedOops, "currently requested address used only for compressed oops");
 136     if (PrintCompressedOopsMode) {
 137       tty->cr();
 138       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
 139     }
 140     // OS ignored requested address. Try different address.
 141     if (special) {
 142       if (!os::release_memory_special(base, size)) {
 143         fatal("os::release_memory_special failed");
 144       }
 145     } else {
 146       if (!os::release_memory(base, size)) {
 147         fatal("os::release_memory failed");
 148       }
 149     }
 150   }
 151   return true;
 152 }
 153 
 154 ReservedSpace::ReservedSpace(const size_t prefix_size,
 155                              const size_t prefix_align,
 156                              const size_t suffix_size,
 157                              const size_t suffix_align,
 158                              char* requested_address,
 159                              const size_t noaccess_prefix)
 160 {
 161   assert(prefix_size != 0, "sanity");
 162   assert(prefix_align != 0, "sanity");
 163   assert(suffix_size != 0, "sanity");
 164   assert(suffix_align != 0, "sanity");
 165   assert((prefix_size & (prefix_align - 1)) == 0,
 166     "prefix_size not divisible by prefix_align");
 167   assert((suffix_size & (suffix_align - 1)) == 0,
 168     "suffix_size not divisible by suffix_align");
 169   assert((suffix_align & (prefix_align - 1)) == 0,
 170     "suffix_align not divisible by prefix_align");
 171 
 172   // Assert that if noaccess_prefix is used, it is the same as prefix_align.
 173   assert(noaccess_prefix == 0 ||
 174          noaccess_prefix == prefix_align, "noaccess prefix wrong");
 175 
 176   // Add in noaccess_prefix to prefix_size;
 177   const size_t adjusted_prefix_size = prefix_size + noaccess_prefix;
 178   const size_t size = adjusted_prefix_size + suffix_size;
 179 
 180   // On systems where the entire region has to be reserved and committed up
 181   // front, the compound alignment normally done by this method is unnecessary.
 182   const bool try_reserve_special = UseLargePages &&
 183     prefix_align == os::large_page_size();
 184   if (!os::can_commit_large_page_memory() && try_reserve_special) {
 185     initialize(size, prefix_align, true, requested_address, noaccess_prefix,
 186                false);
 187     return;
 188   }
 189 
 190   _base = NULL;
 191   _size = 0;
 192   _alignment = 0;
 193   _special = false;
 194   _noaccess_prefix = 0;
 195   _executable = false;
 196 
 197   // Optimistically try to reserve the exact size needed.
 198   char* addr;
 199   if (requested_address != 0) {
 200     requested_address -= noaccess_prefix; // adjust address
 201     assert(requested_address != NULL, "huge noaccess prefix?");
 202     addr = os::attempt_reserve_memory_at(size, requested_address);
 203     if (failed_to_reserve_as_requested(addr, requested_address, size, false)) {
 204       // OS ignored requested address. Try different address.
 205       addr = NULL;
 206     }
 207   } else {
 208     addr = os::reserve_memory(size, NULL, prefix_align);
 209   }
 210   if (addr == NULL) return;
 211 
 212   // Check whether the result has the needed alignment (unlikely unless
 213   // prefix_align < suffix_align).
 214   const size_t ofs = (size_t(addr) + adjusted_prefix_size) & (suffix_align - 1);
 215   if (ofs != 0) {
 216     // Wrong alignment.  Release, allocate more space and do manual alignment.
 217     //
 218     // On most operating systems, another allocation with a somewhat larger size
 219     // will return an address "close to" that of the previous allocation.  The
 220     // result is often the same address (if the kernel hands out virtual
 221     // addresses from low to high), or an address that is offset by the increase
 222     // in size.  Exploit that to minimize the amount of extra space requested.
 223     if (!os::release_memory(addr, size)) {
 224       fatal("os::release_memory failed");
 225     }
 226 
 227     const size_t extra = MAX2(ofs, suffix_align - ofs);
 228     addr = reserve_and_align(size + extra, adjusted_prefix_size, prefix_align,
 229                              suffix_size, suffix_align);
 230     if (addr == NULL) {
 231       // Try an even larger region.  If this fails, address space is exhausted.
 232       addr = reserve_and_align(size + suffix_align, adjusted_prefix_size,
 233                                prefix_align, suffix_size, suffix_align);
 234     }
 235 
 236     if (requested_address != 0 &&
 237         failed_to_reserve_as_requested(addr, requested_address, size, false)) {
 238       // As a result of the alignment constraints, the allocated addr differs
 239       // from the requested address. Return back to the caller who can
 240       // take remedial action (like try again without a requested address).
 241       assert(_base == NULL, "should be");
 242       return;
 243     }
 244   }
 245 
 246   _base = addr;
 247   _size = size;
 248   _alignment = prefix_align;
 249   _noaccess_prefix = noaccess_prefix;
 250 }
 251 
 252 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
 253                                char* requested_address,
 254                                const size_t noaccess_prefix,
 255                                bool executable) {
 256   const size_t granularity = os::vm_allocation_granularity();
 257   assert((size & (granularity - 1)) == 0,
 258          "size not aligned to os::vm_allocation_granularity()");
 259   assert((alignment & (granularity - 1)) == 0,
 260          "alignment not aligned to os::vm_allocation_granularity()");
 261   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 262          "not a power of 2");
 263 
 264   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 265 
 266   // Assert that if noaccess_prefix is used, it is the same as alignment.
 267   assert(noaccess_prefix == 0 ||
 268          noaccess_prefix == alignment, "noaccess prefix wrong");
 269 
 270   _base = NULL;
 271   _size = 0;
 272   _special = false;
 273   _executable = executable;
 274   _alignment = 0;
 275   _noaccess_prefix = 0;
 276   if (size == 0) {
 277     return;
 278   }
 279 
 280   // If OS doesn't support demand paging for large page memory, we need
 281   // to use reserve_memory_special() to reserve and pin the entire region.
 282   bool special = large && !os::can_commit_large_page_memory();
 283   char* base = NULL;
 284 
 285   if (requested_address != 0) {
 286     requested_address -= noaccess_prefix; // adjust requested address
 287     assert(requested_address != NULL, "huge noaccess prefix?");
 288   }
 289 
 290   if (special) {
 291 
 292     base = os::reserve_memory_special(size, requested_address, executable);
 293 
 294     if (base != NULL) {
 295       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 296         // OS ignored requested address. Try different address.
 297         return;
 298       }
 299       // Check alignment constraints
 300       assert((uintptr_t) base % alignment == 0,
 301              "Large pages returned a non-aligned address");
 302       _special = true;
 303     } else {
 304       // failed; try to reserve regular memory below
 305       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 306                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 307         if (PrintCompressedOopsMode) {
 308           tty->cr();
 309           tty->print_cr("Reserve regular memory without large pages.");
 310         }
 311       }
 312     }
 313   }
 314 
 315   if (base == NULL) {
 316     // Optimistically assume that the OSes returns an aligned base pointer.
 317     // When reserving a large address range, most OSes seem to align to at
 318     // least 64K.
 319 
 320     // If the memory was requested at a particular address, use
 321     // os::attempt_reserve_memory_at() to avoid over mapping something
 322     // important.  If available space is not detected, return NULL.
 323 
 324     if (requested_address != 0) {
 325       base = os::attempt_reserve_memory_at(size, requested_address);
 326       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 327         // OS ignored requested address. Try different address.
 328         base = NULL;
 329       }
 330     } else {
 331       base = os::reserve_memory(size, NULL, alignment);
 332     }
 333 
 334     if (base == NULL) return;
 335 
 336     // Check alignment constraints
 337     if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
 338       // Base not aligned, retry
 339       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 340       // Reserve size large enough to do manual alignment and
 341       // increase size to a multiple of the desired alignment
 342       size = align_size_up(size, alignment);
 343       size_t extra_size = size + alignment;
 344       do {
 345         char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
 346         if (extra_base == NULL) return;
 347         // Do manual alignement
 348         base = (char*) align_size_up((uintptr_t) extra_base, alignment);
 349         assert(base >= extra_base, "just checking");
 350         // Re-reserve the region at the aligned base address.
 351         os::release_memory(extra_base, extra_size);
 352         base = os::reserve_memory(size, base);
 353       } while (base == NULL);
 354 
 355       if (requested_address != 0 &&
 356           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 357         // As a result of the alignment constraints, the allocated base differs
 358         // from the requested address. Return back to the caller who can
 359         // take remedial action (like try again without a requested address).
 360         assert(_base == NULL, "should be");
 361         return;
 362       }
 363     }
 364   }
 365   // Done
 366   _base = base;
 367   _size = size;
 368   _alignment = alignment;
 369   _noaccess_prefix = noaccess_prefix;
 370 
 371   // Assert that if noaccess_prefix is used, it is the same as alignment.
 372   assert(noaccess_prefix == 0 ||
 373          noaccess_prefix == _alignment, "noaccess prefix wrong");
 374 
 375   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 376          "area must be distinguisable from marks for mark-sweep");
 377   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 378          "area must be distinguisable from marks for mark-sweep");
 379 }
 380 
 381 
 382 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 383                              bool special, bool executable) {
 384   assert((size % os::vm_allocation_granularity()) == 0,
 385          "size not allocation aligned");
 386   _base = base;
 387   _size = size;
 388   _alignment = alignment;
 389   _noaccess_prefix = 0;
 390   _special = special;
 391   _executable = executable;
 392 }
 393 
 394 
 395 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 396                                         bool split, bool realloc) {
 397   assert(partition_size <= size(), "partition failed");
 398   if (split) {
 399     os::split_reserved_memory(base(), size(), partition_size, realloc);
 400   }
 401   ReservedSpace result(base(), partition_size, alignment, special(),
 402                        executable());
 403   return result;
 404 }
 405 
 406 
 407 ReservedSpace
 408 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 409   assert(partition_size <= size(), "partition failed");
 410   ReservedSpace result(base() + partition_size, size() - partition_size,
 411                        alignment, special(), executable());
 412   return result;
 413 }
 414 
 415 
 416 size_t ReservedSpace::page_align_size_up(size_t size) {
 417   return align_size_up(size, os::vm_page_size());
 418 }
 419 
 420 
 421 size_t ReservedSpace::page_align_size_down(size_t size) {
 422   return align_size_down(size, os::vm_page_size());
 423 }
 424 
 425 
 426 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 427   return align_size_up(size, os::vm_allocation_granularity());
 428 }
 429 
 430 
 431 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 432   return align_size_down(size, os::vm_allocation_granularity());
 433 }
 434 
 435 
 436 void ReservedSpace::release() {
 437   if (is_reserved()) {
 438     char *real_base = _base - _noaccess_prefix;
 439     const size_t real_size = _size + _noaccess_prefix;
 440     if (special()) {
 441       os::release_memory_special(real_base, real_size);
 442     } else{
 443       os::release_memory(real_base, real_size);
 444     }
 445     _base = NULL;
 446     _size = 0;
 447     _noaccess_prefix = 0;
 448     _special = false;
 449     _executable = false;
 450   }
 451 }
 452 
 453 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
 454   assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
 455                                       (size_t(_base + _size) > OopEncodingHeapMax) &&
 456                                       Universe::narrow_oop_use_implicit_null_checks()),
 457          "noaccess_prefix should be used only with non zero based compressed oops");
 458 
 459   // If there is no noaccess prefix, return.
 460   if (_noaccess_prefix == 0) return;
 461 
 462   assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
 463          "must be at least page size big");
 464 
 465   // Protect memory at the base of the allocated region.
 466   // If special, the page was committed (only matters on windows)
 467   if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
 468                           _special)) {
 469     fatal("cannot protect protection page");
 470   }
 471   if (PrintCompressedOopsMode) {
 472     tty->cr();
 473     tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
 474   }
 475 
 476   _base += _noaccess_prefix;
 477   _size -= _noaccess_prefix;
 478   assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
 479          "must be exactly of required size and alignment");
 480 }
 481 
 482 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
 483                                      bool large, char* requested_address) :
 484   ReservedSpace(size, alignment, large,
 485                 requested_address,
 486                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
 487                  Universe::narrow_oop_use_implicit_null_checks()) ?
 488                   lcm(os::vm_page_size(), alignment) : 0) {
 489   // Only reserved space for the java heap should have a noaccess_prefix
 490   // if using compressed oops.
 491   protect_noaccess_prefix(size);
 492 }
 493 
 494 ReservedHeapSpace::ReservedHeapSpace(const size_t prefix_size,
 495                                      const size_t prefix_align,
 496                                      const size_t suffix_size,
 497                                      const size_t suffix_align,
 498                                      char* requested_address) :
 499   ReservedSpace(prefix_size, prefix_align, suffix_size, suffix_align,
 500                 requested_address,
 501                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
 502                  Universe::narrow_oop_use_implicit_null_checks()) ?
 503                   lcm(os::vm_page_size(), prefix_align) : 0) {
 504   protect_noaccess_prefix(prefix_size+suffix_size);
 505 }
 506 
 507 // Reserve space for code segment.  Same as Java heap only we mark this as
 508 // executable.
 509 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 510                                      size_t rs_align,
 511                                      bool large) :
 512   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 513 }
 514 
 515 // VirtualSpace
 516 
 517 VirtualSpace::VirtualSpace() {
 518   _low_boundary           = NULL;
 519   _high_boundary          = NULL;
 520   _low                    = NULL;
 521   _high                   = NULL;
 522   _lower_high             = NULL;
 523   _middle_high            = NULL;
 524   _upper_high             = NULL;
 525   _lower_high_boundary    = NULL;
 526   _middle_high_boundary   = NULL;
 527   _upper_high_boundary    = NULL;
 528   _lower_alignment        = 0;
 529   _middle_alignment       = 0;
 530   _upper_alignment        = 0;
 531   _special                = false;
 532   _executable             = false;
 533 }
 534 
 535 
 536 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 537   if(!rs.is_reserved()) return false;  // allocation failed.
 538   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 539   _low_boundary  = rs.base();
 540   _high_boundary = low_boundary() + rs.size();
 541 
 542   _low = low_boundary();
 543   _high = low();
 544 
 545   _special = rs.special();
 546   _executable = rs.executable();
 547 
 548   // When a VirtualSpace begins life at a large size, make all future expansion
 549   // and shrinking occur aligned to a granularity of large pages.  This avoids
 550   // fragmentation of physical addresses that inhibits the use of large pages
 551   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 552   // page size, the only spaces that get handled this way are codecache and
 553   // the heap itself, both of which provide a substantial performance
 554   // boost in many benchmarks when covered by large pages.
 555   //
 556   // No attempt is made to force large page alignment at the very top and
 557   // bottom of the space if they are not aligned so already.
 558   _lower_alignment  = os::vm_page_size();
 559   _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1);
 560   _upper_alignment  = os::vm_page_size();
 561 
 562   // End of each region
 563   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
 564   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
 565   _upper_high_boundary = high_boundary();
 566 
 567   // High address of each region
 568   _lower_high = low_boundary();
 569   _middle_high = lower_high_boundary();
 570   _upper_high = middle_high_boundary();
 571 
 572   // commit to initial size
 573   if (committed_size > 0) {
 574     if (!expand_by(committed_size)) {
 575       return false;
 576     }
 577   }
 578   return true;
 579 }
 580 
 581 
 582 VirtualSpace::~VirtualSpace() {
 583   release();
 584 }
 585 
 586 
 587 void VirtualSpace::release() {
 588   // This does not release memory it never reserved.
 589   // Caller must release via rs.release();
 590   _low_boundary           = NULL;
 591   _high_boundary          = NULL;
 592   _low                    = NULL;
 593   _high                   = NULL;
 594   _lower_high             = NULL;
 595   _middle_high            = NULL;
 596   _upper_high             = NULL;
 597   _lower_high_boundary    = NULL;
 598   _middle_high_boundary   = NULL;
 599   _upper_high_boundary    = NULL;
 600   _lower_alignment        = 0;
 601   _middle_alignment       = 0;
 602   _upper_alignment        = 0;
 603   _special                = false;
 604   _executable             = false;
 605 }
 606 
 607 
 608 size_t VirtualSpace::committed_size() const {
 609   return pointer_delta(high(), low(), sizeof(char));
 610 }
 611 
 612 
 613 size_t VirtualSpace::reserved_size() const {
 614   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 615 }
 616 
 617 
 618 size_t VirtualSpace::uncommitted_size()  const {
 619   return reserved_size() - committed_size();
 620 }
 621 
 622 
 623 bool VirtualSpace::contains(const void* p) const {
 624   return low() <= (const char*) p && (const char*) p < high();
 625 }
 626 
 627 /*
 628    First we need to determine if a particular virtual space is using large
 629    pages.  This is done at the initialize function and only virtual spaces
 630    that are larger than LargePageSizeInBytes use large pages.  Once we
 631    have determined this, all expand_by and shrink_by calls must grow and
 632    shrink by large page size chunks.  If a particular request
 633    is within the current large page, the call to commit and uncommit memory
 634    can be ignored.  In the case that the low and high boundaries of this
 635    space is not large page aligned, the pages leading to the first large
 636    page address and the pages after the last large page address must be
 637    allocated with default pages.
 638 */
 639 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 640   if (uncommitted_size() < bytes) return false;
 641 
 642   if (special()) {
 643     // don't commit memory if the entire space is pinned in memory
 644     _high += bytes;
 645     return true;
 646   }
 647 
 648   char* previous_high = high();
 649   char* unaligned_new_high = high() + bytes;
 650   assert(unaligned_new_high <= high_boundary(),
 651          "cannot expand by more than upper boundary");
 652 
 653   // Calculate where the new high for each of the regions should be.  If
 654   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 655   // then the unaligned lower and upper new highs would be the
 656   // lower_high() and upper_high() respectively.
 657   char* unaligned_lower_new_high =
 658     MIN2(unaligned_new_high, lower_high_boundary());
 659   char* unaligned_middle_new_high =
 660     MIN2(unaligned_new_high, middle_high_boundary());
 661   char* unaligned_upper_new_high =
 662     MIN2(unaligned_new_high, upper_high_boundary());
 663 
 664   // Align the new highs based on the regions alignment.  lower and upper
 665   // alignment will always be default page size.  middle alignment will be
 666   // LargePageSizeInBytes if the actual size of the virtual space is in
 667   // fact larger than LargePageSizeInBytes.
 668   char* aligned_lower_new_high =
 669     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 670   char* aligned_middle_new_high =
 671     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 672   char* aligned_upper_new_high =
 673     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 674 
 675   // Determine which regions need to grow in this expand_by call.
 676   // If you are growing in the lower region, high() must be in that
 677   // region so calcuate the size based on high().  For the middle and
 678   // upper regions, determine the starting point of growth based on the
 679   // location of high().  By getting the MAX of the region's low address
 680   // (or the prevoius region's high address) and high(), we can tell if it
 681   // is an intra or inter region growth.
 682   size_t lower_needs = 0;
 683   if (aligned_lower_new_high > lower_high()) {
 684     lower_needs =
 685       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 686   }
 687   size_t middle_needs = 0;
 688   if (aligned_middle_new_high > middle_high()) {
 689     middle_needs =
 690       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 691   }
 692   size_t upper_needs = 0;
 693   if (aligned_upper_new_high > upper_high()) {
 694     upper_needs =
 695       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 696   }
 697 
 698   // Check contiguity.
 699   assert(low_boundary() <= lower_high() &&
 700          lower_high() <= lower_high_boundary(),
 701          "high address must be contained within the region");
 702   assert(lower_high_boundary() <= middle_high() &&
 703          middle_high() <= middle_high_boundary(),
 704          "high address must be contained within the region");
 705   assert(middle_high_boundary() <= upper_high() &&
 706          upper_high() <= upper_high_boundary(),
 707          "high address must be contained within the region");
 708 
 709   // Commit regions
 710   if (lower_needs > 0) {
 711     assert(low_boundary() <= lower_high() &&
 712            lower_high() + lower_needs <= lower_high_boundary(),
 713            "must not expand beyond region");
 714     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
 715       debug_only(warning("os::commit_memory failed"));
 716       return false;
 717     } else {
 718       _lower_high += lower_needs;
 719      }
 720   }
 721   if (middle_needs > 0) {
 722     assert(lower_high_boundary() <= middle_high() &&
 723            middle_high() + middle_needs <= middle_high_boundary(),
 724            "must not expand beyond region");
 725     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
 726                            _executable)) {
 727       debug_only(warning("os::commit_memory failed"));
 728       return false;
 729     }
 730     _middle_high += middle_needs;
 731   }
 732   if (upper_needs > 0) {
 733     assert(middle_high_boundary() <= upper_high() &&
 734            upper_high() + upper_needs <= upper_high_boundary(),
 735            "must not expand beyond region");
 736     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
 737       debug_only(warning("os::commit_memory failed"));
 738       return false;
 739     } else {
 740       _upper_high += upper_needs;
 741     }
 742   }
 743 
 744   if (pre_touch || AlwaysPreTouch) {
 745     int vm_ps = os::vm_page_size();
 746     for (char* curr = previous_high;
 747          curr < unaligned_new_high;
 748          curr += vm_ps) {
 749       // Note the use of a write here; originally we tried just a read, but
 750       // since the value read was unused, the optimizer removed the read.
 751       // If we ever have a concurrent touchahead thread, we'll want to use
 752       // a read, to avoid the potential of overwriting data (if a mutator
 753       // thread beats the touchahead thread to a page).  There are various
 754       // ways of making sure this read is not optimized away: for example,
 755       // generating the code for a read procedure at runtime.
 756       *curr = 0;
 757     }
 758   }
 759 
 760   _high += bytes;
 761   return true;
 762 }
 763 
 764 // A page is uncommitted if the contents of the entire page is deemed unusable.
 765 // Continue to decrement the high() pointer until it reaches a page boundary
 766 // in which case that particular page can now be uncommitted.
 767 void VirtualSpace::shrink_by(size_t size) {
 768   if (committed_size() < size)
 769     fatal("Cannot shrink virtual space to negative size");
 770 
 771   if (special()) {
 772     // don't uncommit if the entire space is pinned in memory
 773     _high -= size;
 774     return;
 775   }
 776 
 777   char* unaligned_new_high = high() - size;
 778   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 779 
 780   // Calculate new unaligned address
 781   char* unaligned_upper_new_high =
 782     MAX2(unaligned_new_high, middle_high_boundary());
 783   char* unaligned_middle_new_high =
 784     MAX2(unaligned_new_high, lower_high_boundary());
 785   char* unaligned_lower_new_high =
 786     MAX2(unaligned_new_high, low_boundary());
 787 
 788   // Align address to region's alignment
 789   char* aligned_upper_new_high =
 790     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 791   char* aligned_middle_new_high =
 792     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 793   char* aligned_lower_new_high =
 794     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 795 
 796   // Determine which regions need to shrink
 797   size_t upper_needs = 0;
 798   if (aligned_upper_new_high < upper_high()) {
 799     upper_needs =
 800       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 801   }
 802   size_t middle_needs = 0;
 803   if (aligned_middle_new_high < middle_high()) {
 804     middle_needs =
 805       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 806   }
 807   size_t lower_needs = 0;
 808   if (aligned_lower_new_high < lower_high()) {
 809     lower_needs =
 810       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 811   }
 812 
 813   // Check contiguity.
 814   assert(middle_high_boundary() <= upper_high() &&
 815          upper_high() <= upper_high_boundary(),
 816          "high address must be contained within the region");
 817   assert(lower_high_boundary() <= middle_high() &&
 818          middle_high() <= middle_high_boundary(),
 819          "high address must be contained within the region");
 820   assert(low_boundary() <= lower_high() &&
 821          lower_high() <= lower_high_boundary(),
 822          "high address must be contained within the region");
 823 
 824   // Uncommit
 825   if (upper_needs > 0) {
 826     assert(middle_high_boundary() <= aligned_upper_new_high &&
 827            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 828            "must not shrink beyond region");
 829     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 830       debug_only(warning("os::uncommit_memory failed"));
 831       return;
 832     } else {
 833       _upper_high -= upper_needs;
 834     }
 835   }
 836   if (middle_needs > 0) {
 837     assert(lower_high_boundary() <= aligned_middle_new_high &&
 838            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 839            "must not shrink beyond region");
 840     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 841       debug_only(warning("os::uncommit_memory failed"));
 842       return;
 843     } else {
 844       _middle_high -= middle_needs;
 845     }
 846   }
 847   if (lower_needs > 0) {
 848     assert(low_boundary() <= aligned_lower_new_high &&
 849            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 850            "must not shrink beyond region");
 851     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 852       debug_only(warning("os::uncommit_memory failed"));
 853       return;
 854     } else {
 855       _lower_high -= lower_needs;
 856     }
 857   }
 858 
 859   _high -= size;
 860 }
 861 
 862 #ifndef PRODUCT
 863 void VirtualSpace::check_for_contiguity() {
 864   // Check contiguity.
 865   assert(low_boundary() <= lower_high() &&
 866          lower_high() <= lower_high_boundary(),
 867          "high address must be contained within the region");
 868   assert(lower_high_boundary() <= middle_high() &&
 869          middle_high() <= middle_high_boundary(),
 870          "high address must be contained within the region");
 871   assert(middle_high_boundary() <= upper_high() &&
 872          upper_high() <= upper_high_boundary(),
 873          "high address must be contained within the region");
 874   assert(low() >= low_boundary(), "low");
 875   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
 876   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
 877   assert(high() <= upper_high(), "upper high");
 878 }
 879 
 880 void VirtualSpace::print() {
 881   tty->print   ("Virtual space:");
 882   if (special()) tty->print(" (pinned in memory)");
 883   tty->cr();
 884   tty->print_cr(" - committed: %ld", committed_size());
 885   tty->print_cr(" - reserved:  %ld", reserved_size());
 886   tty->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
 887   tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
 888 }
 889 
 890 #endif