1 /*
   2  * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "oops/markOop.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "runtime/virtualspace.hpp"
  29 #include "services/memTracker.hpp"
  30 #ifdef TARGET_OS_FAMILY_linux
  31 # include "os_linux.inline.hpp"
  32 #endif
  33 #ifdef TARGET_OS_FAMILY_solaris
  34 # include "os_solaris.inline.hpp"
  35 #endif
  36 #ifdef TARGET_OS_FAMILY_windows
  37 # include "os_windows.inline.hpp"
  38 #endif
  39 #ifdef TARGET_OS_FAMILY_bsd
  40 # include "os_bsd.inline.hpp"
  41 #endif
  42 
  43 
  44 // ReservedSpace
  45 ReservedSpace::ReservedSpace(size_t size) {
  46   initialize(size, 0, false, NULL, 0, false);
  47 }
  48 
  49 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  50                              bool large,
  51                              char* requested_address,
  52                              const size_t noaccess_prefix) {
  53   initialize(size+noaccess_prefix, alignment, large, requested_address,
  54              noaccess_prefix, false);
  55 }
  56 
  57 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  58                              bool large,
  59                              bool executable) {
  60   initialize(size, alignment, large, NULL, 0, executable);
  61 }
  62 
  63 char *
  64 ReservedSpace::align_reserved_region(char* addr, const size_t len,
  65                                      const size_t prefix_size,
  66                                      const size_t prefix_align,
  67                                      const size_t suffix_size,
  68                                      const size_t suffix_align)
  69 {
  70   assert(addr != NULL, "sanity");
  71   const size_t required_size = prefix_size + suffix_size;
  72   assert(len >= required_size, "len too small");
  73 
  74   const size_t s = size_t(addr);
  75   const size_t beg_ofs = (s + prefix_size) & (suffix_align - 1);
  76   const size_t beg_delta = beg_ofs == 0 ? 0 : suffix_align - beg_ofs;
  77 
  78   if (len < beg_delta + required_size) {
  79      return NULL; // Cannot do proper alignment.
  80   }
  81   const size_t end_delta = len - (beg_delta + required_size);
  82 
  83   if (beg_delta != 0) {
  84     os::release_memory(addr, beg_delta);
  85   }
  86 
  87   if (end_delta != 0) {
  88     char* release_addr = (char*) (s + beg_delta + required_size);
  89     os::release_memory(release_addr, end_delta);
  90   }
  91 
  92   return (char*) (s + beg_delta);
  93 }
  94 
  95 char* ReservedSpace::reserve_and_align(const size_t reserve_size,
  96                                        const size_t prefix_size,
  97                                        const size_t prefix_align,
  98                                        const size_t suffix_size,
  99                                        const size_t suffix_align)
 100 {
 101   assert(reserve_size > prefix_size + suffix_size, "should not be here");
 102 
 103   char* raw_addr = os::reserve_memory(reserve_size, NULL, prefix_align);
 104   if (raw_addr == NULL) return NULL;
 105 
 106   char* result = align_reserved_region(raw_addr, reserve_size, prefix_size,
 107                                        prefix_align, suffix_size,
 108                                        suffix_align);
 109   if (result == NULL && !os::release_memory(raw_addr, reserve_size)) {
 110     fatal("os::release_memory failed");
 111   }
 112 
 113 #ifdef ASSERT
 114   if (result != NULL) {
 115     const size_t raw = size_t(raw_addr);
 116     const size_t res = size_t(result);
 117     assert(res >= raw, "alignment decreased start addr");
 118     assert(res + prefix_size + suffix_size <= raw + reserve_size,
 119            "alignment increased end addr");
 120     assert((res & (prefix_align - 1)) == 0, "bad alignment of prefix");
 121     assert(((res + prefix_size) & (suffix_align - 1)) == 0,
 122            "bad alignment of suffix");
 123   }
 124 #endif
 125 
 126   return result;
 127 }
 128 
 129 // Helper method.
 130 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
 131                                            const size_t size, bool special)
 132 {
 133   if (base == requested_address || requested_address == NULL)
 134     return false; // did not fail
 135 
 136   if (base != NULL) {
 137     // Different reserve address may be acceptable in other cases
 138     // but for compressed oops heap should be at requested address.
 139     assert(UseCompressedOops, "currently requested address used only for compressed oops");
 140     if (PrintCompressedOopsMode) {
 141       tty->cr();
 142       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
 143     }
 144     // OS ignored requested address. Try different address.
 145     if (special) {
 146       if (!os::release_memory_special(base, size)) {
 147         fatal("os::release_memory_special failed");
 148       }
 149     } else {
 150       if (!os::release_memory(base, size)) {
 151         fatal("os::release_memory failed");
 152       }
 153     }
 154   }
 155   return true;
 156 }
 157 
 158 ReservedSpace::ReservedSpace(const size_t prefix_size,
 159                              const size_t prefix_align,
 160                              const size_t suffix_size,
 161                              const size_t suffix_align,
 162                              char* requested_address,
 163                              const size_t noaccess_prefix)
 164 {
 165   assert(prefix_size != 0, "sanity");
 166   assert(prefix_align != 0, "sanity");
 167   assert(suffix_size != 0, "sanity");
 168   assert(suffix_align != 0, "sanity");
 169   assert((prefix_size & (prefix_align - 1)) == 0,
 170     "prefix_size not divisible by prefix_align");
 171   assert((suffix_size & (suffix_align - 1)) == 0,
 172     "suffix_size not divisible by suffix_align");
 173   assert((suffix_align & (prefix_align - 1)) == 0,
 174     "suffix_align not divisible by prefix_align");
 175 
 176   // Assert that if noaccess_prefix is used, it is the same as prefix_align.
 177   assert(noaccess_prefix == 0 ||
 178          noaccess_prefix == prefix_align, "noaccess prefix wrong");
 179 
 180   // Add in noaccess_prefix to prefix_size;
 181   const size_t adjusted_prefix_size = prefix_size + noaccess_prefix;
 182   const size_t size = adjusted_prefix_size + suffix_size;
 183 
 184   // On systems where the entire region has to be reserved and committed up
 185   // front, the compound alignment normally done by this method is unnecessary.
 186   const bool try_reserve_special = UseLargePages &&
 187     prefix_align == os::large_page_size();
 188   if (!os::can_commit_large_page_memory() && try_reserve_special) {
 189     initialize(size, prefix_align, true, requested_address, noaccess_prefix,
 190                false);
 191     return;
 192   }
 193 
 194   _base = NULL;
 195   _size = 0;
 196   _alignment = 0;
 197   _special = false;
 198   _noaccess_prefix = 0;
 199   _executable = false;
 200 
 201   // Optimistically try to reserve the exact size needed.
 202   char* addr;
 203   if (requested_address != 0) {
 204     requested_address -= noaccess_prefix; // adjust address
 205     assert(requested_address != NULL, "huge noaccess prefix?");
 206     addr = os::attempt_reserve_memory_at(size, requested_address);
 207     if (failed_to_reserve_as_requested(addr, requested_address, size, false)) {
 208       // OS ignored requested address. Try different address.
 209       addr = NULL;
 210     }
 211   } else {
 212     addr = os::reserve_memory(size, NULL, prefix_align);
 213   }
 214   if (addr == NULL) return;
 215 
 216   // Check whether the result has the needed alignment (unlikely unless
 217   // prefix_align < suffix_align).
 218   const size_t ofs = (size_t(addr) + adjusted_prefix_size) & (suffix_align - 1);
 219   if (ofs != 0) {
 220     // Wrong alignment.  Release, allocate more space and do manual alignment.
 221     //
 222     // On most operating systems, another allocation with a somewhat larger size
 223     // will return an address "close to" that of the previous allocation.  The
 224     // result is often the same address (if the kernel hands out virtual
 225     // addresses from low to high), or an address that is offset by the increase
 226     // in size.  Exploit that to minimize the amount of extra space requested.
 227     if (!os::release_memory(addr, size)) {
 228       fatal("os::release_memory failed");
 229     }
 230 
 231     const size_t extra = MAX2(ofs, suffix_align - ofs);
 232     addr = reserve_and_align(size + extra, adjusted_prefix_size, prefix_align,
 233                              suffix_size, suffix_align);
 234     if (addr == NULL) {
 235       // Try an even larger region.  If this fails, address space is exhausted.
 236       addr = reserve_and_align(size + suffix_align, adjusted_prefix_size,
 237                                prefix_align, suffix_size, suffix_align);
 238     }
 239 
 240     if (requested_address != 0 &&
 241         failed_to_reserve_as_requested(addr, requested_address, size, false)) {
 242       // As a result of the alignment constraints, the allocated addr differs
 243       // from the requested address. Return back to the caller who can
 244       // take remedial action (like try again without a requested address).
 245       assert(_base == NULL, "should be");
 246       return;
 247     }
 248   }
 249 
 250   _base = addr;
 251   _size = size;
 252   _alignment = prefix_align;
 253   _noaccess_prefix = noaccess_prefix;
 254 }
 255 
 256 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
 257                                char* requested_address,
 258                                const size_t noaccess_prefix,
 259                                bool executable) {
 260   const size_t granularity = os::vm_allocation_granularity();
 261   assert((size & (granularity - 1)) == 0,
 262          "size not aligned to os::vm_allocation_granularity()");
 263   assert((alignment & (granularity - 1)) == 0,
 264          "alignment not aligned to os::vm_allocation_granularity()");
 265   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 266          "not a power of 2");
 267 
 268   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 269 
 270   // Assert that if noaccess_prefix is used, it is the same as alignment.
 271   assert(noaccess_prefix == 0 ||
 272          noaccess_prefix == alignment, "noaccess prefix wrong");
 273 
 274   _base = NULL;
 275   _size = 0;
 276   _special = false;
 277   _executable = executable;
 278   _alignment = 0;
 279   _noaccess_prefix = 0;
 280   if (size == 0) {
 281     return;
 282   }
 283 
 284   // If OS doesn't support demand paging for large page memory, we need
 285   // to use reserve_memory_special() to reserve and pin the entire region.
 286   bool special = large && !os::can_commit_large_page_memory();
 287   char* base = NULL;
 288 
 289   if (requested_address != 0) {
 290     requested_address -= noaccess_prefix; // adjust requested address
 291     assert(requested_address != NULL, "huge noaccess prefix?");
 292   }
 293 
 294   if (special) {
 295 
 296     base = os::reserve_memory_special(size, requested_address, executable);
 297 
 298     if (base != NULL) {
 299       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 300         // OS ignored requested address. Try different address.
 301         return;
 302       }
 303       // Check alignment constraints
 304       assert((uintptr_t) base % alignment == 0,
 305              "Large pages returned a non-aligned address");
 306       _special = true;
 307     } else {
 308       // failed; try to reserve regular memory below
 309       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 310                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 311         if (PrintCompressedOopsMode) {
 312           tty->cr();
 313           tty->print_cr("Reserve regular memory without large pages.");
 314         }
 315       }
 316     }
 317   }
 318 
 319   if (base == NULL) {
 320     // Optimistically assume that the OSes returns an aligned base pointer.
 321     // When reserving a large address range, most OSes seem to align to at
 322     // least 64K.
 323 
 324     // If the memory was requested at a particular address, use
 325     // os::attempt_reserve_memory_at() to avoid over mapping something
 326     // important.  If available space is not detected, return NULL.
 327 
 328     if (requested_address != 0) {
 329       base = os::attempt_reserve_memory_at(size, requested_address);
 330       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 331         // OS ignored requested address. Try different address.
 332         base = NULL;
 333       }
 334     } else {
 335       base = os::reserve_memory(size, NULL, alignment);
 336     }
 337 
 338     if (base == NULL) return;
 339 
 340     // Check alignment constraints
 341     if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
 342       // Base not aligned, retry
 343       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 344       // Make sure that size is aligned
 345       size = align_size_up(size, alignment);
 346       base = os::reserve_memory_aligned(size, alignment);
 347 
 348       if (requested_address != 0 &&
 349           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 350         // As a result of the alignment constraints, the allocated base differs
 351         // from the requested address. Return back to the caller who can
 352         // take remedial action (like try again without a requested address).
 353         assert(_base == NULL, "should be");
 354         return;
 355       }
 356     }
 357   }
 358   // Done
 359   _base = base;
 360   _size = size;
 361   _alignment = alignment;
 362   _noaccess_prefix = noaccess_prefix;
 363 
 364   // Assert that if noaccess_prefix is used, it is the same as alignment.
 365   assert(noaccess_prefix == 0 ||
 366          noaccess_prefix == _alignment, "noaccess prefix wrong");
 367 
 368   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 369          "area must be distinguisable from marks for mark-sweep");
 370   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 371          "area must be distinguisable from marks for mark-sweep");
 372 }
 373 
 374 
 375 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 376                              bool special, bool executable) {
 377   assert((size % os::vm_allocation_granularity()) == 0,
 378          "size not allocation aligned");
 379   _base = base;
 380   _size = size;
 381   _alignment = alignment;
 382   _noaccess_prefix = 0;
 383   _special = special;
 384   _executable = executable;
 385 }
 386 
 387 
 388 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 389                                         bool split, bool realloc) {
 390   assert(partition_size <= size(), "partition failed");
 391   if (split) {
 392     os::split_reserved_memory(base(), size(), partition_size, realloc);
 393   }
 394   ReservedSpace result(base(), partition_size, alignment, special(),
 395                        executable());
 396   return result;
 397 }
 398 
 399 
 400 ReservedSpace
 401 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 402   assert(partition_size <= size(), "partition failed");
 403   ReservedSpace result(base() + partition_size, size() - partition_size,
 404                        alignment, special(), executable());
 405   return result;
 406 }
 407 
 408 
 409 size_t ReservedSpace::page_align_size_up(size_t size) {
 410   return align_size_up(size, os::vm_page_size());
 411 }
 412 
 413 
 414 size_t ReservedSpace::page_align_size_down(size_t size) {
 415   return align_size_down(size, os::vm_page_size());
 416 }
 417 
 418 
 419 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 420   return align_size_up(size, os::vm_allocation_granularity());
 421 }
 422 
 423 
 424 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 425   return align_size_down(size, os::vm_allocation_granularity());
 426 }
 427 
 428 
 429 void ReservedSpace::release() {
 430   if (is_reserved()) {
 431     char *real_base = _base - _noaccess_prefix;
 432     const size_t real_size = _size + _noaccess_prefix;
 433     if (special()) {
 434       os::release_memory_special(real_base, real_size);
 435     } else{
 436       os::release_memory(real_base, real_size);
 437     }
 438     _base = NULL;
 439     _size = 0;
 440     _noaccess_prefix = 0;
 441     _special = false;
 442     _executable = false;
 443   }
 444 }
 445 
 446 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
 447   assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
 448                                       (Universe::narrow_oop_base() != NULL) &&
 449                                       Universe::narrow_oop_use_implicit_null_checks()),
 450          "noaccess_prefix should be used only with non zero based compressed oops");
 451 
 452   // If there is no noaccess prefix, return.
 453   if (_noaccess_prefix == 0) return;
 454 
 455   assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
 456          "must be at least page size big");
 457 
 458   // Protect memory at the base of the allocated region.
 459   // If special, the page was committed (only matters on windows)
 460   if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
 461                           _special)) {
 462     fatal("cannot protect protection page");
 463   }
 464   if (PrintCompressedOopsMode) {
 465     tty->cr();
 466     tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
 467   }
 468 
 469   _base += _noaccess_prefix;
 470   _size -= _noaccess_prefix;
 471   assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
 472          "must be exactly of required size and alignment");
 473 }
 474 
 475 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
 476                                      bool large, char* requested_address) :
 477   ReservedSpace(size, alignment, large,
 478                 requested_address,
 479                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
 480                  Universe::narrow_oop_use_implicit_null_checks()) ?
 481                   lcm(os::vm_page_size(), alignment) : 0) {
 482   if (base() > 0) {
 483     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 484   }
 485 
 486   // Only reserved space for the java heap should have a noaccess_prefix
 487   // if using compressed oops.
 488   protect_noaccess_prefix(size);
 489 }
 490 
 491 ReservedHeapSpace::ReservedHeapSpace(const size_t prefix_size,
 492                                      const size_t prefix_align,
 493                                      const size_t suffix_size,
 494                                      const size_t suffix_align,
 495                                      char* requested_address) :
 496   ReservedSpace(prefix_size, prefix_align, suffix_size, suffix_align,
 497                 requested_address,
 498                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
 499                  Universe::narrow_oop_use_implicit_null_checks()) ?
 500                   lcm(os::vm_page_size(), prefix_align) : 0) {
 501   if (base() > 0) {
 502     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 503   }
 504 
 505   protect_noaccess_prefix(prefix_size+suffix_size);
 506 }
 507 
 508 // Reserve space for code segment.  Same as Java heap only we mark this as
 509 // executable.
 510 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 511                                      size_t rs_align,
 512                                      bool large) :
 513   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 514   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 515 }
 516 
 517 // VirtualSpace
 518 
 519 VirtualSpace::VirtualSpace() {
 520   _low_boundary           = NULL;
 521   _high_boundary          = NULL;
 522   _low                    = NULL;
 523   _high                   = NULL;
 524   _lower_high             = NULL;
 525   _middle_high            = NULL;
 526   _upper_high             = NULL;
 527   _lower_high_boundary    = NULL;
 528   _middle_high_boundary   = NULL;
 529   _upper_high_boundary    = NULL;
 530   _lower_alignment        = 0;
 531   _middle_alignment       = 0;
 532   _upper_alignment        = 0;
 533   _special                = false;
 534   _executable             = false;
 535 }
 536 
 537 
 538 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 539   if(!rs.is_reserved()) return false;  // allocation failed.
 540   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 541   _low_boundary  = rs.base();
 542   _high_boundary = low_boundary() + rs.size();
 543 
 544   _low = low_boundary();
 545   _high = low();
 546 
 547   _special = rs.special();
 548   _executable = rs.executable();
 549 
 550   // When a VirtualSpace begins life at a large size, make all future expansion
 551   // and shrinking occur aligned to a granularity of large pages.  This avoids
 552   // fragmentation of physical addresses that inhibits the use of large pages
 553   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 554   // page size, the only spaces that get handled this way are codecache and
 555   // the heap itself, both of which provide a substantial performance
 556   // boost in many benchmarks when covered by large pages.
 557   //
 558   // No attempt is made to force large page alignment at the very top and
 559   // bottom of the space if they are not aligned so already.
 560   _lower_alignment  = os::vm_page_size();
 561   _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1);
 562   _upper_alignment  = os::vm_page_size();
 563 
 564   // End of each region
 565   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
 566   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
 567   _upper_high_boundary = high_boundary();
 568 
 569   // High address of each region
 570   _lower_high = low_boundary();
 571   _middle_high = lower_high_boundary();
 572   _upper_high = middle_high_boundary();
 573 
 574   // commit to initial size
 575   if (committed_size > 0) {
 576     if (!expand_by(committed_size)) {
 577       return false;
 578     }
 579   }
 580   return true;
 581 }
 582 
 583 
 584 VirtualSpace::~VirtualSpace() {
 585   release();
 586 }
 587 
 588 
 589 void VirtualSpace::release() {
 590   // This does not release memory it never reserved.
 591   // Caller must release via rs.release();
 592   _low_boundary           = NULL;
 593   _high_boundary          = NULL;
 594   _low                    = NULL;
 595   _high                   = NULL;
 596   _lower_high             = NULL;
 597   _middle_high            = NULL;
 598   _upper_high             = NULL;
 599   _lower_high_boundary    = NULL;
 600   _middle_high_boundary   = NULL;
 601   _upper_high_boundary    = NULL;
 602   _lower_alignment        = 0;
 603   _middle_alignment       = 0;
 604   _upper_alignment        = 0;
 605   _special                = false;
 606   _executable             = false;
 607 }
 608 
 609 
 610 size_t VirtualSpace::committed_size() const {
 611   return pointer_delta(high(), low(), sizeof(char));
 612 }
 613 
 614 
 615 size_t VirtualSpace::reserved_size() const {
 616   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 617 }
 618 
 619 
 620 size_t VirtualSpace::uncommitted_size()  const {
 621   return reserved_size() - committed_size();
 622 }
 623 
 624 
 625 bool VirtualSpace::contains(const void* p) const {
 626   return low() <= (const char*) p && (const char*) p < high();
 627 }
 628 
 629 /*
 630    First we need to determine if a particular virtual space is using large
 631    pages.  This is done at the initialize function and only virtual spaces
 632    that are larger than LargePageSizeInBytes use large pages.  Once we
 633    have determined this, all expand_by and shrink_by calls must grow and
 634    shrink by large page size chunks.  If a particular request
 635    is within the current large page, the call to commit and uncommit memory
 636    can be ignored.  In the case that the low and high boundaries of this
 637    space is not large page aligned, the pages leading to the first large
 638    page address and the pages after the last large page address must be
 639    allocated with default pages.
 640 */
 641 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 642   if (uncommitted_size() < bytes) return false;
 643 
 644   if (special()) {
 645     // don't commit memory if the entire space is pinned in memory
 646     _high += bytes;
 647     return true;
 648   }
 649 
 650   char* previous_high = high();
 651   char* unaligned_new_high = high() + bytes;
 652   assert(unaligned_new_high <= high_boundary(),
 653          "cannot expand by more than upper boundary");
 654 
 655   // Calculate where the new high for each of the regions should be.  If
 656   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 657   // then the unaligned lower and upper new highs would be the
 658   // lower_high() and upper_high() respectively.
 659   char* unaligned_lower_new_high =
 660     MIN2(unaligned_new_high, lower_high_boundary());
 661   char* unaligned_middle_new_high =
 662     MIN2(unaligned_new_high, middle_high_boundary());
 663   char* unaligned_upper_new_high =
 664     MIN2(unaligned_new_high, upper_high_boundary());
 665 
 666   // Align the new highs based on the regions alignment.  lower and upper
 667   // alignment will always be default page size.  middle alignment will be
 668   // LargePageSizeInBytes if the actual size of the virtual space is in
 669   // fact larger than LargePageSizeInBytes.
 670   char* aligned_lower_new_high =
 671     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 672   char* aligned_middle_new_high =
 673     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 674   char* aligned_upper_new_high =
 675     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 676 
 677   // Determine which regions need to grow in this expand_by call.
 678   // If you are growing in the lower region, high() must be in that
 679   // region so calcuate the size based on high().  For the middle and
 680   // upper regions, determine the starting point of growth based on the
 681   // location of high().  By getting the MAX of the region's low address
 682   // (or the prevoius region's high address) and high(), we can tell if it
 683   // is an intra or inter region growth.
 684   size_t lower_needs = 0;
 685   if (aligned_lower_new_high > lower_high()) {
 686     lower_needs =
 687       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 688   }
 689   size_t middle_needs = 0;
 690   if (aligned_middle_new_high > middle_high()) {
 691     middle_needs =
 692       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 693   }
 694   size_t upper_needs = 0;
 695   if (aligned_upper_new_high > upper_high()) {
 696     upper_needs =
 697       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 698   }
 699 
 700   // Check contiguity.
 701   assert(low_boundary() <= lower_high() &&
 702          lower_high() <= lower_high_boundary(),
 703          "high address must be contained within the region");
 704   assert(lower_high_boundary() <= middle_high() &&
 705          middle_high() <= middle_high_boundary(),
 706          "high address must be contained within the region");
 707   assert(middle_high_boundary() <= upper_high() &&
 708          upper_high() <= upper_high_boundary(),
 709          "high address must be contained within the region");
 710 
 711   // Commit regions
 712   if (lower_needs > 0) {
 713     assert(low_boundary() <= lower_high() &&
 714            lower_high() + lower_needs <= lower_high_boundary(),
 715            "must not expand beyond region");
 716     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
 717       debug_only(warning("os::commit_memory failed"));
 718       return false;
 719     } else {
 720       _lower_high += lower_needs;
 721      }
 722   }
 723   if (middle_needs > 0) {
 724     assert(lower_high_boundary() <= middle_high() &&
 725            middle_high() + middle_needs <= middle_high_boundary(),
 726            "must not expand beyond region");
 727     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
 728                            _executable)) {
 729       debug_only(warning("os::commit_memory failed"));
 730       return false;
 731     }
 732     _middle_high += middle_needs;
 733   }
 734   if (upper_needs > 0) {
 735     assert(middle_high_boundary() <= upper_high() &&
 736            upper_high() + upper_needs <= upper_high_boundary(),
 737            "must not expand beyond region");
 738     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
 739       debug_only(warning("os::commit_memory failed"));
 740       return false;
 741     } else {
 742       _upper_high += upper_needs;
 743     }
 744   }
 745 
 746   if (pre_touch || AlwaysPreTouch) {
 747     int vm_ps = os::vm_page_size();
 748     for (char* curr = previous_high;
 749          curr < unaligned_new_high;
 750          curr += vm_ps) {
 751       // Note the use of a write here; originally we tried just a read, but
 752       // since the value read was unused, the optimizer removed the read.
 753       // If we ever have a concurrent touchahead thread, we'll want to use
 754       // a read, to avoid the potential of overwriting data (if a mutator
 755       // thread beats the touchahead thread to a page).  There are various
 756       // ways of making sure this read is not optimized away: for example,
 757       // generating the code for a read procedure at runtime.
 758       *curr = 0;
 759     }
 760   }
 761 
 762   _high += bytes;
 763   return true;
 764 }
 765 
 766 // A page is uncommitted if the contents of the entire page is deemed unusable.
 767 // Continue to decrement the high() pointer until it reaches a page boundary
 768 // in which case that particular page can now be uncommitted.
 769 void VirtualSpace::shrink_by(size_t size) {
 770   if (committed_size() < size)
 771     fatal("Cannot shrink virtual space to negative size");
 772 
 773   if (special()) {
 774     // don't uncommit if the entire space is pinned in memory
 775     _high -= size;
 776     return;
 777   }
 778 
 779   char* unaligned_new_high = high() - size;
 780   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 781 
 782   // Calculate new unaligned address
 783   char* unaligned_upper_new_high =
 784     MAX2(unaligned_new_high, middle_high_boundary());
 785   char* unaligned_middle_new_high =
 786     MAX2(unaligned_new_high, lower_high_boundary());
 787   char* unaligned_lower_new_high =
 788     MAX2(unaligned_new_high, low_boundary());
 789 
 790   // Align address to region's alignment
 791   char* aligned_upper_new_high =
 792     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 793   char* aligned_middle_new_high =
 794     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 795   char* aligned_lower_new_high =
 796     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 797 
 798   // Determine which regions need to shrink
 799   size_t upper_needs = 0;
 800   if (aligned_upper_new_high < upper_high()) {
 801     upper_needs =
 802       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 803   }
 804   size_t middle_needs = 0;
 805   if (aligned_middle_new_high < middle_high()) {
 806     middle_needs =
 807       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 808   }
 809   size_t lower_needs = 0;
 810   if (aligned_lower_new_high < lower_high()) {
 811     lower_needs =
 812       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 813   }
 814 
 815   // Check contiguity.
 816   assert(middle_high_boundary() <= upper_high() &&
 817          upper_high() <= upper_high_boundary(),
 818          "high address must be contained within the region");
 819   assert(lower_high_boundary() <= middle_high() &&
 820          middle_high() <= middle_high_boundary(),
 821          "high address must be contained within the region");
 822   assert(low_boundary() <= lower_high() &&
 823          lower_high() <= lower_high_boundary(),
 824          "high address must be contained within the region");
 825 
 826   // Uncommit
 827   if (upper_needs > 0) {
 828     assert(middle_high_boundary() <= aligned_upper_new_high &&
 829            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 830            "must not shrink beyond region");
 831     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 832       debug_only(warning("os::uncommit_memory failed"));
 833       return;
 834     } else {
 835       _upper_high -= upper_needs;
 836     }
 837   }
 838   if (middle_needs > 0) {
 839     assert(lower_high_boundary() <= aligned_middle_new_high &&
 840            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 841            "must not shrink beyond region");
 842     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 843       debug_only(warning("os::uncommit_memory failed"));
 844       return;
 845     } else {
 846       _middle_high -= middle_needs;
 847     }
 848   }
 849   if (lower_needs > 0) {
 850     assert(low_boundary() <= aligned_lower_new_high &&
 851            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 852            "must not shrink beyond region");
 853     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 854       debug_only(warning("os::uncommit_memory failed"));
 855       return;
 856     } else {
 857       _lower_high -= lower_needs;
 858     }
 859   }
 860 
 861   _high -= size;
 862 }
 863 
 864 #ifndef PRODUCT
 865 void VirtualSpace::check_for_contiguity() {
 866   // Check contiguity.
 867   assert(low_boundary() <= lower_high() &&
 868          lower_high() <= lower_high_boundary(),
 869          "high address must be contained within the region");
 870   assert(lower_high_boundary() <= middle_high() &&
 871          middle_high() <= middle_high_boundary(),
 872          "high address must be contained within the region");
 873   assert(middle_high_boundary() <= upper_high() &&
 874          upper_high() <= upper_high_boundary(),
 875          "high address must be contained within the region");
 876   assert(low() >= low_boundary(), "low");
 877   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
 878   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
 879   assert(high() <= upper_high(), "upper high");
 880 }
 881 
 882 void VirtualSpace::print() {
 883   tty->print   ("Virtual space:");
 884   if (special()) tty->print(" (pinned in memory)");
 885   tty->cr();
 886   tty->print_cr(" - committed: %ld", committed_size());
 887   tty->print_cr(" - reserved:  %ld", reserved_size());
 888   tty->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
 889   tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
 890 }
 891 
 892 #endif