1 /*
   2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "oops/markOop.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "runtime/virtualspace.hpp"
  29 #include "services/memTracker.hpp"
  30 #ifdef TARGET_OS_FAMILY_linux
  31 # include "os_linux.inline.hpp"
  32 #endif
  33 #ifdef TARGET_OS_FAMILY_solaris
  34 # include "os_solaris.inline.hpp"
  35 #endif
  36 #ifdef TARGET_OS_FAMILY_windows
  37 # include "os_windows.inline.hpp"
  38 #endif
  39 #ifdef TARGET_OS_FAMILY_bsd
  40 # include "os_bsd.inline.hpp"
  41 #endif
  42 
  43 
  44 // ReservedSpace
  45 ReservedSpace::ReservedSpace(size_t size) {
  46   initialize(size, 0, false, NULL, 0, false);
  47 }
  48 
  49 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  50                              bool large,
  51                              char* requested_address,
  52                              const size_t noaccess_prefix) {
  53   initialize(size+noaccess_prefix, alignment, large, requested_address,
  54              noaccess_prefix, false);
  55 }
  56 
  57 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  58                              bool large,
  59                              bool executable) {
  60   initialize(size, alignment, large, NULL, 0, executable);
  61 }
  62 
  63 char *
  64 ReservedSpace::align_reserved_region(char* addr, const size_t len,
  65                                      const size_t prefix_size,
  66                                      const size_t prefix_align,
  67                                      const size_t suffix_size,
  68                                      const size_t suffix_align)
  69 {
  70   assert(addr != NULL, "sanity");
  71   const size_t required_size = prefix_size + suffix_size;
  72   assert(len >= required_size, "len too small");
  73 
  74   const size_t s = size_t(addr);
  75   const size_t beg_ofs = (s + prefix_size) & (suffix_align - 1);
  76   const size_t beg_delta = beg_ofs == 0 ? 0 : suffix_align - beg_ofs;
  77 
  78   if (len < beg_delta + required_size) {
  79      return NULL; // Cannot do proper alignment.
  80   }
  81   const size_t end_delta = len - (beg_delta + required_size);
  82 
  83   if (beg_delta != 0) {
  84     os::release_or_uncommit_partial_region(addr, beg_delta);
  85   }
  86 
  87   if (end_delta != 0) {
  88     char* release_addr = (char*) (s + beg_delta + required_size);
  89     os::release_or_uncommit_partial_region(release_addr, end_delta);
  90   }
  91 
  92   return (char*) (s + beg_delta);
  93 }
  94 
  95 void ReservedSpace::set_raw_base_and_size(char * const raw_base,
  96                                           size_t raw_size) {
  97   assert(raw_base == NULL || !os::can_release_partial_region(), "sanity");
  98   _raw_base = raw_base;
  99   _raw_size = raw_size;
 100 }
 101 
 102 // On some systems (e.g., windows), the address returned by os::reserve_memory()
 103 // is the only addr that can be passed to os::release_memory().  If alignment
 104 // was done by this class, that original address is _raw_base.
 105 void ReservedSpace::release_memory(char* default_addr, size_t default_size) {
 106   bool ok;
 107   if (_raw_base == NULL) {
 108     ok = os::release_memory(default_addr, default_size);
 109   } else {
 110     assert(!os::can_release_partial_region(), "sanity");
 111     ok = os::release_memory(_raw_base, _raw_size);
 112   }
 113   if (!ok) {
 114     fatal("os::release_memory failed");
 115   }
 116   set_raw_base_and_size(NULL, 0);
 117 }
 118 
 119 char* ReservedSpace::reserve_and_align(const size_t reserve_size,
 120                                        const size_t prefix_size,
 121                                        const size_t prefix_align,
 122                                        const size_t suffix_size,
 123                                        const size_t suffix_align)
 124 {
 125   assert(reserve_size > prefix_size + suffix_size, "should not be here");
 126 
 127   char* raw_addr = os::reserve_memory(reserve_size, NULL, prefix_align);
 128   if (raw_addr == NULL) return NULL;
 129 
 130   char* result = align_reserved_region(raw_addr, reserve_size, prefix_size,
 131                                        prefix_align, suffix_size,
 132                                        suffix_align);
 133   if (result == NULL && !os::release_memory(raw_addr, reserve_size)) {
 134     fatal("os::release_memory failed");
 135   }
 136 
 137   if (!os::can_release_partial_region()) {
 138     set_raw_base_and_size(raw_addr, reserve_size);
 139   }
 140 
 141 #ifdef ASSERT
 142   if (result != NULL) {
 143     const size_t raw = size_t(raw_addr);
 144     const size_t res = size_t(result);
 145     assert(res >= raw, "alignment decreased start addr");
 146     assert(res + prefix_size + suffix_size <= raw + reserve_size,
 147            "alignment increased end addr");
 148     assert((res & (prefix_align - 1)) == 0, "bad alignment of prefix");
 149     assert(((res + prefix_size) & (suffix_align - 1)) == 0,
 150            "bad alignment of suffix");
 151   }
 152 #endif
 153 
 154   return result;
 155 }
 156 
 157 // Helper method.
 158 bool ReservedSpace::failed_to_reserve_as_requested(char* base,
 159                                                    char* requested_address,
 160                                                    const size_t size,
 161                                                    bool special)
 162 {
 163   if (base == requested_address || requested_address == NULL)
 164     return false; // did not fail
 165 
 166   if (base != NULL) {
 167     // Different reserve address may be acceptable in other cases
 168     // but for compressed oops heap should be at requested address.
 169     assert(UseCompressedOops, "currently requested address used only for compressed oops");
 170     if (PrintCompressedOopsMode) {
 171       tty->cr();
 172       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
 173     }
 174     // OS ignored requested address. Try different address.
 175     if (special) {
 176       if (!os::release_memory_special(base, size)) {
 177         fatal("os::release_memory_special failed");
 178       }
 179     } else {
 180       release_memory(base, size);
 181     }
 182   }
 183   return true;
 184 }
 185 
 186 ReservedSpace::ReservedSpace(const size_t prefix_size,
 187                              const size_t prefix_align,
 188                              const size_t suffix_size,
 189                              const size_t suffix_align,
 190                              char* requested_address,
 191                              const size_t noaccess_prefix)
 192 {
 193   assert(prefix_size != 0, "sanity");
 194   assert(prefix_align != 0, "sanity");
 195   assert(suffix_size != 0, "sanity");
 196   assert(suffix_align != 0, "sanity");
 197   assert((prefix_size & (prefix_align - 1)) == 0,
 198     "prefix_size not divisible by prefix_align");
 199   assert((suffix_size & (suffix_align - 1)) == 0,
 200     "suffix_size not divisible by suffix_align");
 201   assert((suffix_align & (prefix_align - 1)) == 0,
 202     "suffix_align not divisible by prefix_align");
 203 
 204   // Assert that if noaccess_prefix is used, it is the same as prefix_align.
 205   assert(noaccess_prefix == 0 ||
 206          noaccess_prefix == prefix_align, "noaccess prefix wrong");
 207 
 208   set_raw_base_and_size(NULL, 0);
 209 
 210   // Add in noaccess_prefix to prefix_size;
 211   const size_t adjusted_prefix_size = prefix_size + noaccess_prefix;
 212   const size_t size = adjusted_prefix_size + suffix_size;
 213 
 214   // On systems where the entire region has to be reserved and committed up
 215   // front, the compound alignment normally done by this method is unnecessary.
 216   const bool try_reserve_special = UseLargePages &&
 217     prefix_align == os::large_page_size();
 218   if (!os::can_commit_large_page_memory() && try_reserve_special) {
 219     initialize(size, prefix_align, true, requested_address, noaccess_prefix,
 220                false);
 221     return;
 222   }
 223 
 224   _base = NULL;
 225   _size = 0;
 226   _alignment = 0;
 227   _special = false;
 228   _noaccess_prefix = 0;
 229   _executable = false;
 230 
 231   // Optimistically try to reserve the exact size needed.
 232   char* addr;
 233   if (requested_address != 0) {
 234     requested_address -= noaccess_prefix; // adjust address
 235     assert(requested_address != NULL, "huge noaccess prefix?");
 236     addr = os::attempt_reserve_memory_at(size, requested_address);
 237     if (failed_to_reserve_as_requested(addr, requested_address, size, false)) {
 238       // OS ignored requested address. Try different address.
 239       addr = NULL;
 240     }
 241   } else {
 242     addr = os::reserve_memory(size, NULL, prefix_align);
 243   }
 244   if (addr == NULL) return;
 245 
 246   // Check whether the result has the needed alignment (unlikely unless
 247   // prefix_align < suffix_align).
 248   const size_t ofs = (size_t(addr) + adjusted_prefix_size) & (suffix_align - 1);
 249   if (ofs != 0) {
 250     // Wrong alignment.  Release, allocate more space and do manual alignment.
 251     //
 252     // On most operating systems, another allocation with a somewhat larger size
 253     // will return an address "close to" that of the previous allocation.  The
 254     // result is often the same address (if the kernel hands out virtual
 255     // addresses from low to high), or an address that is offset by the increase
 256     // in size.  Exploit that to minimize the amount of extra space requested.
 257     release_memory(addr, size);
 258 
 259     const size_t extra = MAX2(ofs, suffix_align - ofs);
 260     addr = reserve_and_align(size + extra, adjusted_prefix_size, prefix_align,
 261                              suffix_size, suffix_align);
 262     if (addr == NULL) {
 263       // Try an even larger region.  If this fails, address space is exhausted.
 264       addr = reserve_and_align(size + suffix_align, adjusted_prefix_size,
 265                                prefix_align, suffix_size, suffix_align);
 266     }
 267 
 268     if (requested_address != 0 &&
 269         failed_to_reserve_as_requested(addr, requested_address, size, false)) {
 270       // As a result of the alignment constraints, the allocated addr differs
 271       // from the requested address. Return back to the caller who can
 272       // take remedial action (like try again without a requested address).
 273       assert(_base == NULL, "should be");
 274       return;
 275     }
 276   }
 277 
 278   _base = addr;
 279   _size = size;
 280   _alignment = prefix_align;
 281   _noaccess_prefix = noaccess_prefix;
 282 }
 283 
 284 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
 285                                char* requested_address,
 286                                const size_t noaccess_prefix,
 287                                bool executable) {
 288   const size_t granularity = os::vm_allocation_granularity();
 289   assert((size & (granularity - 1)) == 0,
 290          "size not aligned to os::vm_allocation_granularity()");
 291   assert((alignment & (granularity - 1)) == 0,
 292          "alignment not aligned to os::vm_allocation_granularity()");
 293   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 294          "not a power of 2");
 295 
 296   set_raw_base_and_size(NULL, 0);
 297 
 298   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 299 
 300   // Assert that if noaccess_prefix is used, it is the same as alignment.
 301   assert(noaccess_prefix == 0 ||
 302          noaccess_prefix == alignment, "noaccess prefix wrong");
 303 
 304   _base = NULL;
 305   _size = 0;
 306   _special = false;
 307   _executable = executable;
 308   _alignment = 0;
 309   _noaccess_prefix = 0;
 310   if (size == 0) {
 311     return;
 312   }
 313 
 314   // If OS doesn't support demand paging for large page memory, we need
 315   // to use reserve_memory_special() to reserve and pin the entire region.
 316   bool special = large && !os::can_commit_large_page_memory();
 317   char* base = NULL;
 318 
 319   if (requested_address != 0) {
 320     requested_address -= noaccess_prefix; // adjust requested address
 321     assert(requested_address != NULL, "huge noaccess prefix?");
 322   }
 323 
 324   if (special) {
 325 
 326     base = os::reserve_memory_special(size, requested_address, executable);
 327 
 328     if (base != NULL) {
 329       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 330         // OS ignored requested address. Try different address.
 331         return;
 332       }
 333       // Check alignment constraints
 334       assert((uintptr_t) base % alignment == 0,
 335              "Large pages returned a non-aligned address");
 336       _special = true;
 337     } else {
 338       // failed; try to reserve regular memory below
 339       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 340                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 341         if (PrintCompressedOopsMode) {
 342           tty->cr();
 343           tty->print_cr("Reserve regular memory without large pages.");
 344         }
 345       }
 346     }
 347   }
 348 
 349   if (base == NULL) {
 350     // Optimistically assume that the OSes returns an aligned base pointer.
 351     // When reserving a large address range, most OSes seem to align to at
 352     // least 64K.
 353 
 354     // If the memory was requested at a particular address, use
 355     // os::attempt_reserve_memory_at() to avoid over mapping something
 356     // important.  If available space is not detected, return NULL.
 357 
 358     if (requested_address != 0) {
 359       base = os::attempt_reserve_memory_at(size, requested_address);
 360       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 361         // OS ignored requested address. Try different address.
 362         base = NULL;
 363       }
 364     } else {
 365       base = os::reserve_memory(size, NULL, alignment);
 366     }
 367 
 368     if (base == NULL) return;
 369 
 370     // Check alignment constraints
 371     if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
 372       // Base not aligned, retry
 373       release_memory(base, size);
 374 
 375       // Make sure that size is aligned
 376       size = align_size_up(size, alignment);
 377       base = os::reserve_memory_aligned(size, alignment);
 378 
 379       if (requested_address != 0 &&
 380           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 381         // As a result of the alignment constraints, the allocated base differs
 382         // from the requested address. Return back to the caller who can
 383         // take remedial action (like try again without a requested address).
 384         assert(_base == NULL, "should be");
 385         return;
 386       }
 387     }
 388   }
 389   // Done
 390   _base = base;
 391   _size = size;
 392   _alignment = alignment;
 393   _noaccess_prefix = noaccess_prefix;
 394 
 395   // Assert that if noaccess_prefix is used, it is the same as alignment.
 396   assert(noaccess_prefix == 0 ||
 397          noaccess_prefix == _alignment, "noaccess prefix wrong");
 398 
 399   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 400          "area must be distinguisable from marks for mark-sweep");
 401   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 402          "area must be distinguisable from marks for mark-sweep");
 403 }
 404 
 405 
 406 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 407                              bool special, bool executable) {
 408   assert((size % os::vm_allocation_granularity()) == 0,
 409          "size not allocation aligned");
 410   _base = base;
 411   _size = size;
 412   set_raw_base_and_size(NULL, 0);
 413   _alignment = alignment;
 414   _noaccess_prefix = 0;
 415   _special = special;
 416   _executable = executable;
 417 }
 418 
 419 
 420 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 421                                         bool split, bool realloc) {
 422   assert(partition_size <= size(), "partition failed");
 423   if (split) {
 424     os::split_reserved_memory(base(), size(), partition_size, realloc);
 425   }
 426   ReservedSpace result(base(), partition_size, alignment, special(),
 427                        executable());
 428   return result;
 429 }
 430 
 431 
 432 ReservedSpace
 433 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 434   assert(partition_size <= size(), "partition failed");
 435   ReservedSpace result(base() + partition_size, size() - partition_size,
 436                        alignment, special(), executable());
 437   return result;
 438 }
 439 
 440 
 441 size_t ReservedSpace::page_align_size_up(size_t size) {
 442   return align_size_up(size, os::vm_page_size());
 443 }
 444 
 445 
 446 size_t ReservedSpace::page_align_size_down(size_t size) {
 447   return align_size_down(size, os::vm_page_size());
 448 }
 449 
 450 
 451 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 452   return align_size_up(size, os::vm_allocation_granularity());
 453 }
 454 
 455 
 456 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 457   return align_size_down(size, os::vm_allocation_granularity());
 458 }
 459 
 460 
 461 void ReservedSpace::release() {
 462   if (is_reserved()) {
 463     char *real_base = _base - _noaccess_prefix;
 464     const size_t real_size = _size + _noaccess_prefix;
 465     if (special()) {
 466       os::release_memory_special(real_base, real_size);
 467     } else{
 468       release_memory(real_base, real_size);
 469     }
 470     _base = NULL;
 471     _size = 0;
 472     _noaccess_prefix = 0;
 473     _special = false;
 474     _executable = false;
 475   }
 476 }
 477 
 478 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
 479   assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
 480                                       (Universe::narrow_oop_base() != NULL) &&
 481                                       Universe::narrow_oop_use_implicit_null_checks()),
 482          "noaccess_prefix should be used only with non zero based compressed oops");
 483 
 484   // If there is no noaccess prefix, return.
 485   if (_noaccess_prefix == 0) return;
 486 
 487   assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
 488          "must be at least page size big");
 489 
 490   // Protect memory at the base of the allocated region.
 491   // If special, the page was committed (only matters on windows)
 492   if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
 493                           _special)) {
 494     fatal("cannot protect protection page");
 495   }
 496   if (PrintCompressedOopsMode) {
 497     tty->cr();
 498     tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
 499   }
 500 
 501   _base += _noaccess_prefix;
 502   _size -= _noaccess_prefix;
 503   assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
 504          "must be exactly of required size and alignment");
 505 }
 506 
 507 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
 508                                      bool large, char* requested_address) :
 509   ReservedSpace(size, alignment, large,
 510                 requested_address,
 511                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
 512                  Universe::narrow_oop_use_implicit_null_checks()) ?
 513                   lcm(os::vm_page_size(), alignment) : 0) {
 514   if (base() > 0) {
 515     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 516   }
 517 
 518   // Only reserved space for the java heap should have a noaccess_prefix
 519   // if using compressed oops.
 520   protect_noaccess_prefix(size);
 521 }
 522 
 523 ReservedHeapSpace::ReservedHeapSpace(const size_t prefix_size,
 524                                      const size_t prefix_align,
 525                                      const size_t suffix_size,
 526                                      const size_t suffix_align,
 527                                      char* requested_address) :
 528   ReservedSpace(prefix_size, prefix_align, suffix_size, suffix_align,
 529                 requested_address,
 530                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
 531                  Universe::narrow_oop_use_implicit_null_checks()) ?
 532                   lcm(os::vm_page_size(), prefix_align) : 0) {
 533   if (base() > 0) {
 534     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 535   }
 536 
 537   protect_noaccess_prefix(prefix_size+suffix_size);
 538 }
 539 
 540 // Reserve space for code segment.  Same as Java heap only we mark this as
 541 // executable.
 542 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 543                                      size_t rs_align,
 544                                      bool large) :
 545   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 546   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 547 }
 548 
 549 // VirtualSpace
 550 
 551 VirtualSpace::VirtualSpace() {
 552   _low_boundary           = NULL;
 553   _high_boundary          = NULL;
 554   _low                    = NULL;
 555   _high                   = NULL;
 556   _lower_high             = NULL;
 557   _middle_high            = NULL;
 558   _upper_high             = NULL;
 559   _lower_high_boundary    = NULL;
 560   _middle_high_boundary   = NULL;
 561   _upper_high_boundary    = NULL;
 562   _lower_alignment        = 0;
 563   _middle_alignment       = 0;
 564   _upper_alignment        = 0;
 565   _special                = false;
 566   _executable             = false;
 567 }
 568 
 569 
 570 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 571   if(!rs.is_reserved()) return false;  // allocation failed.
 572   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 573   _low_boundary  = rs.base();
 574   _high_boundary = low_boundary() + rs.size();
 575 
 576   _low = low_boundary();
 577   _high = low();
 578 
 579   _special = rs.special();
 580   _executable = rs.executable();
 581 
 582   // When a VirtualSpace begins life at a large size, make all future expansion
 583   // and shrinking occur aligned to a granularity of large pages.  This avoids
 584   // fragmentation of physical addresses that inhibits the use of large pages
 585   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 586   // page size, the only spaces that get handled this way are codecache and
 587   // the heap itself, both of which provide a substantial performance
 588   // boost in many benchmarks when covered by large pages.
 589   //
 590   // No attempt is made to force large page alignment at the very top and
 591   // bottom of the space if they are not aligned so already.
 592   _lower_alignment  = os::vm_page_size();
 593   _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1);
 594   _upper_alignment  = os::vm_page_size();
 595 
 596   // End of each region
 597   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
 598   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
 599   _upper_high_boundary = high_boundary();
 600 
 601   // High address of each region
 602   _lower_high = low_boundary();
 603   _middle_high = lower_high_boundary();
 604   _upper_high = middle_high_boundary();
 605 
 606   // commit to initial size
 607   if (committed_size > 0) {
 608     if (!expand_by(committed_size)) {
 609       return false;
 610     }
 611   }
 612   return true;
 613 }
 614 
 615 
 616 VirtualSpace::~VirtualSpace() {
 617   release();
 618 }
 619 
 620 
 621 void VirtualSpace::release() {
 622   // This does not release memory it never reserved.
 623   // Caller must release via rs.release();
 624   _low_boundary           = NULL;
 625   _high_boundary          = NULL;
 626   _low                    = NULL;
 627   _high                   = NULL;
 628   _lower_high             = NULL;
 629   _middle_high            = NULL;
 630   _upper_high             = NULL;
 631   _lower_high_boundary    = NULL;
 632   _middle_high_boundary   = NULL;
 633   _upper_high_boundary    = NULL;
 634   _lower_alignment        = 0;
 635   _middle_alignment       = 0;
 636   _upper_alignment        = 0;
 637   _special                = false;
 638   _executable             = false;
 639 }
 640 
 641 
 642 size_t VirtualSpace::committed_size() const {
 643   return pointer_delta(high(), low(), sizeof(char));
 644 }
 645 
 646 
 647 size_t VirtualSpace::reserved_size() const {
 648   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 649 }
 650 
 651 
 652 size_t VirtualSpace::uncommitted_size()  const {
 653   return reserved_size() - committed_size();
 654 }
 655 
 656 
 657 bool VirtualSpace::contains(const void* p) const {
 658   return low() <= (const char*) p && (const char*) p < high();
 659 }
 660 
 661 /*
 662    First we need to determine if a particular virtual space is using large
 663    pages.  This is done at the initialize function and only virtual spaces
 664    that are larger than LargePageSizeInBytes use large pages.  Once we
 665    have determined this, all expand_by and shrink_by calls must grow and
 666    shrink by large page size chunks.  If a particular request
 667    is within the current large page, the call to commit and uncommit memory
 668    can be ignored.  In the case that the low and high boundaries of this
 669    space is not large page aligned, the pages leading to the first large
 670    page address and the pages after the last large page address must be
 671    allocated with default pages.
 672 */
 673 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 674   if (uncommitted_size() < bytes) return false;
 675 
 676   if (special()) {
 677     // don't commit memory if the entire space is pinned in memory
 678     _high += bytes;
 679     return true;
 680   }
 681 
 682   char* previous_high = high();
 683   char* unaligned_new_high = high() + bytes;
 684   assert(unaligned_new_high <= high_boundary(),
 685          "cannot expand by more than upper boundary");
 686 
 687   // Calculate where the new high for each of the regions should be.  If
 688   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 689   // then the unaligned lower and upper new highs would be the
 690   // lower_high() and upper_high() respectively.
 691   char* unaligned_lower_new_high =
 692     MIN2(unaligned_new_high, lower_high_boundary());
 693   char* unaligned_middle_new_high =
 694     MIN2(unaligned_new_high, middle_high_boundary());
 695   char* unaligned_upper_new_high =
 696     MIN2(unaligned_new_high, upper_high_boundary());
 697 
 698   // Align the new highs based on the regions alignment.  lower and upper
 699   // alignment will always be default page size.  middle alignment will be
 700   // LargePageSizeInBytes if the actual size of the virtual space is in
 701   // fact larger than LargePageSizeInBytes.
 702   char* aligned_lower_new_high =
 703     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 704   char* aligned_middle_new_high =
 705     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 706   char* aligned_upper_new_high =
 707     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 708 
 709   // Determine which regions need to grow in this expand_by call.
 710   // If you are growing in the lower region, high() must be in that
 711   // region so calcuate the size based on high().  For the middle and
 712   // upper regions, determine the starting point of growth based on the
 713   // location of high().  By getting the MAX of the region's low address
 714   // (or the prevoius region's high address) and high(), we can tell if it
 715   // is an intra or inter region growth.
 716   size_t lower_needs = 0;
 717   if (aligned_lower_new_high > lower_high()) {
 718     lower_needs =
 719       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 720   }
 721   size_t middle_needs = 0;
 722   if (aligned_middle_new_high > middle_high()) {
 723     middle_needs =
 724       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 725   }
 726   size_t upper_needs = 0;
 727   if (aligned_upper_new_high > upper_high()) {
 728     upper_needs =
 729       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 730   }
 731 
 732   // Check contiguity.
 733   assert(low_boundary() <= lower_high() &&
 734          lower_high() <= lower_high_boundary(),
 735          "high address must be contained within the region");
 736   assert(lower_high_boundary() <= middle_high() &&
 737          middle_high() <= middle_high_boundary(),
 738          "high address must be contained within the region");
 739   assert(middle_high_boundary() <= upper_high() &&
 740          upper_high() <= upper_high_boundary(),
 741          "high address must be contained within the region");
 742 
 743   // Commit regions
 744   if (lower_needs > 0) {
 745     assert(low_boundary() <= lower_high() &&
 746            lower_high() + lower_needs <= lower_high_boundary(),
 747            "must not expand beyond region");
 748     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
 749       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 750                          ", lower_needs=" SIZE_FORMAT ", %d) failed",
 751                          lower_high(), lower_needs, _executable);)
 752       return false;
 753     } else {
 754       _lower_high += lower_needs;
 755     }
 756   }
 757   if (middle_needs > 0) {
 758     assert(lower_high_boundary() <= middle_high() &&
 759            middle_high() + middle_needs <= middle_high_boundary(),
 760            "must not expand beyond region");
 761     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
 762                            _executable)) {
 763       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 764                          ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
 765                          ", %d) failed", middle_high(), middle_needs,
 766                          middle_alignment(), _executable);)
 767       return false;
 768     }
 769     _middle_high += middle_needs;
 770   }
 771   if (upper_needs > 0) {
 772     assert(middle_high_boundary() <= upper_high() &&
 773            upper_high() + upper_needs <= upper_high_boundary(),
 774            "must not expand beyond region");
 775     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
 776       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 777                          ", upper_needs=" SIZE_FORMAT ", %d) failed",
 778                          upper_high(), upper_needs, _executable);)
 779       return false;
 780     } else {
 781       _upper_high += upper_needs;
 782     }
 783   }
 784 
 785   if (pre_touch || AlwaysPreTouch) {
 786     int vm_ps = os::vm_page_size();
 787     for (char* curr = previous_high;
 788          curr < unaligned_new_high;
 789          curr += vm_ps) {
 790       // Note the use of a write here; originally we tried just a read, but
 791       // since the value read was unused, the optimizer removed the read.
 792       // If we ever have a concurrent touchahead thread, we'll want to use
 793       // a read, to avoid the potential of overwriting data (if a mutator
 794       // thread beats the touchahead thread to a page).  There are various
 795       // ways of making sure this read is not optimized away: for example,
 796       // generating the code for a read procedure at runtime.
 797       *curr = 0;
 798     }
 799   }
 800 
 801   _high += bytes;
 802   return true;
 803 }
 804 
 805 // A page is uncommitted if the contents of the entire page is deemed unusable.
 806 // Continue to decrement the high() pointer until it reaches a page boundary
 807 // in which case that particular page can now be uncommitted.
 808 void VirtualSpace::shrink_by(size_t size) {
 809   if (committed_size() < size)
 810     fatal("Cannot shrink virtual space to negative size");
 811 
 812   if (special()) {
 813     // don't uncommit if the entire space is pinned in memory
 814     _high -= size;
 815     return;
 816   }
 817 
 818   char* unaligned_new_high = high() - size;
 819   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 820 
 821   // Calculate new unaligned address
 822   char* unaligned_upper_new_high =
 823     MAX2(unaligned_new_high, middle_high_boundary());
 824   char* unaligned_middle_new_high =
 825     MAX2(unaligned_new_high, lower_high_boundary());
 826   char* unaligned_lower_new_high =
 827     MAX2(unaligned_new_high, low_boundary());
 828 
 829   // Align address to region's alignment
 830   char* aligned_upper_new_high =
 831     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 832   char* aligned_middle_new_high =
 833     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 834   char* aligned_lower_new_high =
 835     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 836 
 837   // Determine which regions need to shrink
 838   size_t upper_needs = 0;
 839   if (aligned_upper_new_high < upper_high()) {
 840     upper_needs =
 841       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 842   }
 843   size_t middle_needs = 0;
 844   if (aligned_middle_new_high < middle_high()) {
 845     middle_needs =
 846       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 847   }
 848   size_t lower_needs = 0;
 849   if (aligned_lower_new_high < lower_high()) {
 850     lower_needs =
 851       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 852   }
 853 
 854   // Check contiguity.
 855   assert(middle_high_boundary() <= upper_high() &&
 856          upper_high() <= upper_high_boundary(),
 857          "high address must be contained within the region");
 858   assert(lower_high_boundary() <= middle_high() &&
 859          middle_high() <= middle_high_boundary(),
 860          "high address must be contained within the region");
 861   assert(low_boundary() <= lower_high() &&
 862          lower_high() <= lower_high_boundary(),
 863          "high address must be contained within the region");
 864 
 865   // Uncommit
 866   if (upper_needs > 0) {
 867     assert(middle_high_boundary() <= aligned_upper_new_high &&
 868            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 869            "must not shrink beyond region");
 870     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 871       debug_only(warning("os::uncommit_memory failed"));
 872       return;
 873     } else {
 874       _upper_high -= upper_needs;
 875     }
 876   }
 877   if (middle_needs > 0) {
 878     assert(lower_high_boundary() <= aligned_middle_new_high &&
 879            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 880            "must not shrink beyond region");
 881     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 882       debug_only(warning("os::uncommit_memory failed"));
 883       return;
 884     } else {
 885       _middle_high -= middle_needs;
 886     }
 887   }
 888   if (lower_needs > 0) {
 889     assert(low_boundary() <= aligned_lower_new_high &&
 890            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 891            "must not shrink beyond region");
 892     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 893       debug_only(warning("os::uncommit_memory failed"));
 894       return;
 895     } else {
 896       _lower_high -= lower_needs;
 897     }
 898   }
 899 
 900   _high -= size;
 901 }
 902 
 903 #ifndef PRODUCT
 904 void VirtualSpace::check_for_contiguity() {
 905   // Check contiguity.
 906   assert(low_boundary() <= lower_high() &&
 907          lower_high() <= lower_high_boundary(),
 908          "high address must be contained within the region");
 909   assert(lower_high_boundary() <= middle_high() &&
 910          middle_high() <= middle_high_boundary(),
 911          "high address must be contained within the region");
 912   assert(middle_high_boundary() <= upper_high() &&
 913          upper_high() <= upper_high_boundary(),
 914          "high address must be contained within the region");
 915   assert(low() >= low_boundary(), "low");
 916   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
 917   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
 918   assert(high() <= upper_high(), "upper high");
 919 }
 920 
 921 void VirtualSpace::print() {
 922   tty->print   ("Virtual space:");
 923   if (special()) tty->print(" (pinned in memory)");
 924   tty->cr();
 925   tty->print_cr(" - committed: %ld", committed_size());
 926   tty->print_cr(" - reserved:  %ld", reserved_size());
 927   tty->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
 928   tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
 929 }
 930 
 931 #endif