1 /*
   2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "oops/markOop.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "runtime/virtualspace.hpp"
  29 #include "services/memTracker.hpp"
  30 #ifdef TARGET_OS_FAMILY_linux
  31 # include "os_linux.inline.hpp"
  32 #endif
  33 #ifdef TARGET_OS_FAMILY_solaris
  34 # include "os_solaris.inline.hpp"
  35 #endif
  36 #ifdef TARGET_OS_FAMILY_windows
  37 # include "os_windows.inline.hpp"
  38 #endif
  39 #ifdef TARGET_OS_FAMILY_bsd
  40 # include "os_bsd.inline.hpp"
  41 #endif
  42 
  43 
  44 // ReservedSpace
  45 
  46 // Dummy constructor
  47 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  48     _alignment(0), _special(false), _executable(false) {
  49 }
  50 
  51 ReservedSpace::ReservedSpace(size_t size) {
  52   size_t page_size = os::page_size_for_region(size, size, 1);
  53   bool large_pages = page_size != (size_t)os::vm_page_size();
  54   // Don't force the alignment to be large page aligned,
  55   // since that will waste memory.
  56   size_t alignment = os::vm_allocation_granularity();
  57   initialize(size, alignment, large_pages, NULL, 0, false);
  58 }
  59 
  60 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  61                              bool large,
  62                              char* requested_address,
  63                              const size_t noaccess_prefix) {
  64   initialize(size+noaccess_prefix, alignment, large, requested_address,
  65              noaccess_prefix, false);
  66 }
  67 
  68 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  69                              bool large,
  70                              bool executable) {
  71   initialize(size, alignment, large, NULL, 0, executable);
  72 }
  73 
  74 char *
  75 ReservedSpace::align_reserved_region(char* addr, const size_t len,
  76                                      const size_t prefix_size,
  77                                      const size_t prefix_align,
  78                                      const size_t suffix_size,
  79                                      const size_t suffix_align)
  80 {
  81   assert(addr != NULL, "sanity");
  82   const size_t required_size = prefix_size + suffix_size;
  83   assert(len >= required_size, "len too small");
  84 
  85   const size_t s = size_t(addr);
  86   const size_t beg_ofs = (s + prefix_size) & (suffix_align - 1);
  87   const size_t beg_delta = beg_ofs == 0 ? 0 : suffix_align - beg_ofs;
  88 
  89   if (len < beg_delta + required_size) {
  90      return NULL; // Cannot do proper alignment.
  91   }
  92   const size_t end_delta = len - (beg_delta + required_size);
  93 
  94   if (beg_delta != 0) {
  95     os::release_or_uncommit_partial_region(addr, beg_delta);
  96   }
  97 
  98   if (end_delta != 0) {
  99     char* release_addr = (char*) (s + beg_delta + required_size);
 100     os::release_or_uncommit_partial_region(release_addr, end_delta);
 101   }
 102 
 103   return (char*) (s + beg_delta);
 104 }
 105 
 106 void ReservedSpace::set_raw_base_and_size(char * const raw_base,
 107                                           size_t raw_size) {
 108   assert(raw_base == NULL || !os::can_release_partial_region(), "sanity");
 109   _raw_base = raw_base;
 110   _raw_size = raw_size;
 111 }
 112 
 113 // On some systems (e.g., windows), the address returned by os::reserve_memory()
 114 // is the only addr that can be passed to os::release_memory().  If alignment
 115 // was done by this class, that original address is _raw_base.
 116 void ReservedSpace::release_memory(char* default_addr, size_t default_size) {
 117   bool ok;
 118   if (_raw_base == NULL) {
 119     ok = os::release_memory(default_addr, default_size);
 120   } else {
 121     assert(!os::can_release_partial_region(), "sanity");
 122     ok = os::release_memory(_raw_base, _raw_size);
 123   }
 124   if (!ok) {
 125     fatal("os::release_memory failed");
 126   }
 127   set_raw_base_and_size(NULL, 0);
 128 }
 129 
 130 char* ReservedSpace::reserve_and_align(const size_t reserve_size,
 131                                        const size_t prefix_size,
 132                                        const size_t prefix_align,
 133                                        const size_t suffix_size,
 134                                        const size_t suffix_align)
 135 {
 136   assert(reserve_size > prefix_size + suffix_size, "should not be here");
 137 
 138   char* raw_addr = os::reserve_memory(reserve_size, NULL, prefix_align);
 139   if (raw_addr == NULL) return NULL;
 140 
 141   char* result = align_reserved_region(raw_addr, reserve_size, prefix_size,
 142                                        prefix_align, suffix_size,
 143                                        suffix_align);
 144   if (result == NULL && !os::release_memory(raw_addr, reserve_size)) {
 145     fatal("os::release_memory failed");
 146   }
 147 
 148   if (!os::can_release_partial_region()) {
 149     set_raw_base_and_size(raw_addr, reserve_size);
 150   }
 151 
 152 #ifdef ASSERT
 153   if (result != NULL) {
 154     const size_t raw = size_t(raw_addr);
 155     const size_t res = size_t(result);
 156     assert(res >= raw, "alignment decreased start addr");
 157     assert(res + prefix_size + suffix_size <= raw + reserve_size,
 158            "alignment increased end addr");
 159     assert((res & (prefix_align - 1)) == 0, "bad alignment of prefix");
 160     assert(((res + prefix_size) & (suffix_align - 1)) == 0,
 161            "bad alignment of suffix");
 162   }
 163 #endif
 164 
 165   return result;
 166 }
 167 
 168 // Helper method.
 169 bool ReservedSpace::failed_to_reserve_as_requested(char* base,
 170                                                    char* requested_address,
 171                                                    const size_t size,
 172                                                    bool special)
 173 {
 174   if (base == requested_address || requested_address == NULL)
 175     return false; // did not fail
 176 
 177   if (base != NULL) {
 178     // Different reserve address may be acceptable in other cases
 179     // but for compressed oops heap should be at requested address.
 180     assert(UseCompressedOops, "currently requested address used only for compressed oops");
 181     if (PrintCompressedOopsMode) {
 182       tty->cr();
 183       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
 184     }
 185     // OS ignored requested address. Try different address.
 186     if (special) {
 187       if (!os::release_memory_special(base, size)) {
 188         fatal("os::release_memory_special failed");
 189       }
 190     } else {
 191       release_memory(base, size);
 192     }
 193   }
 194   return true;
 195 }
 196 
 197 ReservedSpace::ReservedSpace(const size_t prefix_size,
 198                              const size_t prefix_align,
 199                              const size_t suffix_size,
 200                              const size_t suffix_align,
 201                              char* requested_address,
 202                              const size_t noaccess_prefix)
 203 {
 204   assert(prefix_size != 0, "sanity");
 205   assert(prefix_align != 0, "sanity");
 206   assert(suffix_size != 0, "sanity");
 207   assert(suffix_align != 0, "sanity");
 208   assert((prefix_size & (prefix_align - 1)) == 0,
 209     "prefix_size not divisible by prefix_align");
 210   assert((suffix_size & (suffix_align - 1)) == 0,
 211     "suffix_size not divisible by suffix_align");
 212   assert((suffix_align & (prefix_align - 1)) == 0,
 213     "suffix_align not divisible by prefix_align");
 214 
 215   // Assert that if noaccess_prefix is used, it is the same as prefix_align.
 216   assert(noaccess_prefix == 0 ||
 217          noaccess_prefix == prefix_align, "noaccess prefix wrong");
 218 
 219   set_raw_base_and_size(NULL, 0);
 220 
 221   // Add in noaccess_prefix to prefix_size;
 222   const size_t adjusted_prefix_size = prefix_size + noaccess_prefix;
 223   const size_t size = adjusted_prefix_size + suffix_size;
 224 
 225   // On systems where the entire region has to be reserved and committed up
 226   // front, the compound alignment normally done by this method is unnecessary.
 227   const bool try_reserve_special = UseLargePages &&
 228     prefix_align == os::large_page_size();
 229   if (!os::can_commit_large_page_memory() && try_reserve_special) {
 230     initialize(size, prefix_align, true, requested_address, noaccess_prefix,
 231                false);
 232     return;
 233   }
 234 
 235   _base = NULL;
 236   _size = 0;
 237   _alignment = 0;
 238   _special = false;
 239   _noaccess_prefix = 0;
 240   _executable = false;
 241 
 242   // Optimistically try to reserve the exact size needed.
 243   char* addr;
 244   if (requested_address != 0) {
 245     requested_address -= noaccess_prefix; // adjust address
 246     assert(requested_address != NULL, "huge noaccess prefix?");
 247     addr = os::attempt_reserve_memory_at(size, requested_address);
 248     if (failed_to_reserve_as_requested(addr, requested_address, size, false)) {
 249       // OS ignored requested address. Try different address.
 250       addr = NULL;
 251     }
 252   } else {
 253     addr = os::reserve_memory(size, NULL, prefix_align);
 254   }
 255   if (addr == NULL) return;
 256 
 257   // Check whether the result has the needed alignment (unlikely unless
 258   // prefix_align < suffix_align).
 259   const size_t ofs = (size_t(addr) + adjusted_prefix_size) & (suffix_align - 1);
 260   if (ofs != 0) {
 261     // Wrong alignment.  Release, allocate more space and do manual alignment.
 262     //
 263     // On most operating systems, another allocation with a somewhat larger size
 264     // will return an address "close to" that of the previous allocation.  The
 265     // result is often the same address (if the kernel hands out virtual
 266     // addresses from low to high), or an address that is offset by the increase
 267     // in size.  Exploit that to minimize the amount of extra space requested.
 268     release_memory(addr, size);
 269 
 270     const size_t extra = MAX2(ofs, suffix_align - ofs);
 271     addr = reserve_and_align(size + extra, adjusted_prefix_size, prefix_align,
 272                              suffix_size, suffix_align);
 273     if (addr == NULL) {
 274       // Try an even larger region.  If this fails, address space is exhausted.
 275       addr = reserve_and_align(size + suffix_align, adjusted_prefix_size,
 276                                prefix_align, suffix_size, suffix_align);
 277     }
 278 
 279     if (requested_address != 0 &&
 280         failed_to_reserve_as_requested(addr, requested_address, size, false)) {
 281       // As a result of the alignment constraints, the allocated addr differs
 282       // from the requested address. Return back to the caller who can
 283       // take remedial action (like try again without a requested address).
 284       assert(_base == NULL, "should be");
 285       return;
 286     }
 287   }
 288 
 289   _base = addr;
 290   _size = size;
 291   _alignment = prefix_align;
 292   _noaccess_prefix = noaccess_prefix;
 293 }
 294 
 295 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
 296                                char* requested_address,
 297                                const size_t noaccess_prefix,
 298                                bool executable) {
 299   const size_t granularity = os::vm_allocation_granularity();
 300   assert((size & (granularity - 1)) == 0,
 301          "size not aligned to os::vm_allocation_granularity()");
 302   assert((alignment & (granularity - 1)) == 0,
 303          "alignment not aligned to os::vm_allocation_granularity()");
 304   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 305          "not a power of 2");
 306 
 307   set_raw_base_and_size(NULL, 0);
 308 
 309   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 310 
 311   // Assert that if noaccess_prefix is used, it is the same as alignment.
 312   assert(noaccess_prefix == 0 ||
 313          noaccess_prefix == alignment, "noaccess prefix wrong");
 314 
 315   _base = NULL;
 316   _size = 0;
 317   _special = false;
 318   _executable = executable;
 319   _alignment = 0;
 320   _noaccess_prefix = 0;
 321   if (size == 0) {
 322     return;
 323   }
 324 
 325   // If OS doesn't support demand paging for large page memory, we need
 326   // to use reserve_memory_special() to reserve and pin the entire region.
 327   bool special = large && !os::can_commit_large_page_memory();
 328   char* base = NULL;
 329 
 330   if (requested_address != 0) {
 331     requested_address -= noaccess_prefix; // adjust requested address
 332     assert(requested_address != NULL, "huge noaccess prefix?");
 333   }
 334 
 335   if (special) {
 336 
 337     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 338 
 339     if (base != NULL) {
 340       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 341         // OS ignored requested address. Try different address.
 342         return;
 343       }
 344       // Check alignment constraints.
 345       assert((uintptr_t) base % alignment == 0,
 346              err_msg("Large pages returned a non-aligned address, base: "
 347                  PTR_FORMAT " alignment: " PTR_FORMAT,
 348                  base, (void*)(uintptr_t)alignment));
 349       _special = true;
 350     } else {
 351       // failed; try to reserve regular memory below
 352       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 353                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 354         if (PrintCompressedOopsMode) {
 355           tty->cr();
 356           tty->print_cr("Reserve regular memory without large pages.");
 357         }
 358       }
 359     }
 360   }
 361 
 362   if (base == NULL) {
 363     // Optimistically assume that the OSes returns an aligned base pointer.
 364     // When reserving a large address range, most OSes seem to align to at
 365     // least 64K.
 366 
 367     // If the memory was requested at a particular address, use
 368     // os::attempt_reserve_memory_at() to avoid over mapping something
 369     // important.  If available space is not detected, return NULL.
 370 
 371     if (requested_address != 0) {
 372       base = os::attempt_reserve_memory_at(size, requested_address);
 373       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 374         // OS ignored requested address. Try different address.
 375         base = NULL;
 376       }
 377     } else {
 378       base = os::reserve_memory(size, NULL, alignment);
 379     }
 380 
 381     if (base == NULL) return;
 382 
 383     // Check alignment constraints
 384     if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
 385       // Base not aligned, retry
 386       release_memory(base, size);
 387 
 388       // Make sure that size is aligned
 389       size = align_size_up(size, alignment);
 390       base = os::reserve_memory_aligned(size, alignment);
 391 
 392       if (requested_address != 0 &&
 393           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 394         // As a result of the alignment constraints, the allocated base differs
 395         // from the requested address. Return back to the caller who can
 396         // take remedial action (like try again without a requested address).
 397         assert(_base == NULL, "should be");
 398         return;
 399       }
 400     }
 401   }
 402   // Done
 403   _base = base;
 404   _size = size;
 405   _alignment = alignment;
 406   _noaccess_prefix = noaccess_prefix;
 407 
 408   // Assert that if noaccess_prefix is used, it is the same as alignment.
 409   assert(noaccess_prefix == 0 ||
 410          noaccess_prefix == _alignment, "noaccess prefix wrong");
 411 
 412   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 413          "area must be distinguisable from marks for mark-sweep");
 414   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 415          "area must be distinguisable from marks for mark-sweep");
 416 }
 417 
 418 
 419 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 420                              bool special, bool executable) {
 421   assert((size % os::vm_allocation_granularity()) == 0,
 422          "size not allocation aligned");
 423   _base = base;
 424   _size = size;
 425   set_raw_base_and_size(NULL, 0);
 426   _alignment = alignment;
 427   _noaccess_prefix = 0;
 428   _special = special;
 429   _executable = executable;
 430 }
 431 
 432 
 433 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 434                                         bool split, bool realloc) {
 435   assert(partition_size <= size(), "partition failed");
 436   if (split) {
 437     os::split_reserved_memory(base(), size(), partition_size, realloc);
 438   }
 439   ReservedSpace result(base(), partition_size, alignment, special(),
 440                        executable());
 441   return result;
 442 }
 443 
 444 
 445 ReservedSpace
 446 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 447   assert(partition_size <= size(), "partition failed");
 448   ReservedSpace result(base() + partition_size, size() - partition_size,
 449                        alignment, special(), executable());
 450   return result;
 451 }
 452 
 453 
 454 size_t ReservedSpace::page_align_size_up(size_t size) {
 455   return align_size_up(size, os::vm_page_size());
 456 }
 457 
 458 
 459 size_t ReservedSpace::page_align_size_down(size_t size) {
 460   return align_size_down(size, os::vm_page_size());
 461 }
 462 
 463 
 464 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 465   return align_size_up(size, os::vm_allocation_granularity());
 466 }
 467 
 468 
 469 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 470   return align_size_down(size, os::vm_allocation_granularity());
 471 }
 472 
 473 
 474 void ReservedSpace::release() {
 475   if (is_reserved()) {
 476     char *real_base = _base - _noaccess_prefix;
 477     const size_t real_size = _size + _noaccess_prefix;
 478     if (special()) {
 479       os::release_memory_special(real_base, real_size);
 480     } else{
 481       release_memory(real_base, real_size);
 482     }
 483     _base = NULL;
 484     _size = 0;
 485     _noaccess_prefix = 0;
 486     _special = false;
 487     _executable = false;
 488   }
 489 }
 490 
 491 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
 492   assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
 493                                       (Universe::narrow_oop_base() != NULL) &&
 494                                       Universe::narrow_oop_use_implicit_null_checks()),
 495          "noaccess_prefix should be used only with non zero based compressed oops");
 496 
 497   // If there is no noaccess prefix, return.
 498   if (_noaccess_prefix == 0) return;
 499 
 500   assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
 501          "must be at least page size big");
 502 
 503   // Protect memory at the base of the allocated region.
 504   // If special, the page was committed (only matters on windows)
 505   if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
 506                           _special)) {
 507     fatal("cannot protect protection page");
 508   }
 509   if (PrintCompressedOopsMode) {
 510     tty->cr();
 511     tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
 512   }
 513 
 514   _base += _noaccess_prefix;
 515   _size -= _noaccess_prefix;
 516   assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
 517          "must be exactly of required size and alignment");
 518 }
 519 
 520 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
 521                                      bool large, char* requested_address) :
 522   ReservedSpace(size, alignment, large,
 523                 requested_address,
 524                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
 525                  Universe::narrow_oop_use_implicit_null_checks()) ?
 526                   lcm(os::vm_page_size(), alignment) : 0) {
 527   if (base() > 0) {
 528     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 529   }
 530 
 531   // Only reserved space for the java heap should have a noaccess_prefix
 532   // if using compressed oops.
 533   protect_noaccess_prefix(size);
 534 }
 535 
 536 ReservedHeapSpace::ReservedHeapSpace(const size_t prefix_size,
 537                                      const size_t prefix_align,
 538                                      const size_t suffix_size,
 539                                      const size_t suffix_align,
 540                                      char* requested_address) :
 541   ReservedSpace(prefix_size, prefix_align, suffix_size, suffix_align,
 542                 requested_address,
 543                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
 544                  Universe::narrow_oop_use_implicit_null_checks()) ?
 545                   lcm(os::vm_page_size(), prefix_align) : 0) {
 546   if (base() > 0) {
 547     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 548   }
 549 
 550   protect_noaccess_prefix(prefix_size+suffix_size);
 551 }
 552 
 553 // Reserve space for code segment.  Same as Java heap only we mark this as
 554 // executable.
 555 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 556                                      size_t rs_align,
 557                                      bool large) :
 558   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 559   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 560 }
 561 
 562 // VirtualSpace
 563 
 564 VirtualSpace::VirtualSpace() {
 565   _low_boundary           = NULL;
 566   _high_boundary          = NULL;
 567   _low                    = NULL;
 568   _high                   = NULL;
 569   _lower_high             = NULL;
 570   _middle_high            = NULL;
 571   _upper_high             = NULL;
 572   _lower_high_boundary    = NULL;
 573   _middle_high_boundary   = NULL;
 574   _upper_high_boundary    = NULL;
 575   _lower_alignment        = 0;
 576   _middle_alignment       = 0;
 577   _upper_alignment        = 0;
 578   _special                = false;
 579   _executable             = false;
 580 }
 581 
 582 
 583 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 584   if(!rs.is_reserved()) return false;  // allocation failed.
 585   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 586   _low_boundary  = rs.base();
 587   _high_boundary = low_boundary() + rs.size();
 588 
 589   _low = low_boundary();
 590   _high = low();
 591 
 592   _special = rs.special();
 593   _executable = rs.executable();
 594 
 595   // When a VirtualSpace begins life at a large size, make all future expansion
 596   // and shrinking occur aligned to a granularity of large pages.  This avoids
 597   // fragmentation of physical addresses that inhibits the use of large pages
 598   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 599   // page size, the only spaces that get handled this way are codecache and
 600   // the heap itself, both of which provide a substantial performance
 601   // boost in many benchmarks when covered by large pages.
 602   //
 603   // No attempt is made to force large page alignment at the very top and
 604   // bottom of the space if they are not aligned so already.
 605   _lower_alignment  = os::vm_page_size();
 606   _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1);
 607   _upper_alignment  = os::vm_page_size();
 608 
 609   // End of each region
 610   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
 611   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
 612   _upper_high_boundary = high_boundary();
 613 
 614   // High address of each region
 615   _lower_high = low_boundary();
 616   _middle_high = lower_high_boundary();
 617   _upper_high = middle_high_boundary();
 618 
 619   // commit to initial size
 620   if (committed_size > 0) {
 621     if (!expand_by(committed_size)) {
 622       return false;
 623     }
 624   }
 625   return true;
 626 }
 627 
 628 
 629 VirtualSpace::~VirtualSpace() {
 630   release();
 631 }
 632 
 633 
 634 void VirtualSpace::release() {
 635   // This does not release memory it never reserved.
 636   // Caller must release via rs.release();
 637   _low_boundary           = NULL;
 638   _high_boundary          = NULL;
 639   _low                    = NULL;
 640   _high                   = NULL;
 641   _lower_high             = NULL;
 642   _middle_high            = NULL;
 643   _upper_high             = NULL;
 644   _lower_high_boundary    = NULL;
 645   _middle_high_boundary   = NULL;
 646   _upper_high_boundary    = NULL;
 647   _lower_alignment        = 0;
 648   _middle_alignment       = 0;
 649   _upper_alignment        = 0;
 650   _special                = false;
 651   _executable             = false;
 652 }
 653 
 654 
 655 size_t VirtualSpace::committed_size() const {
 656   return pointer_delta(high(), low(), sizeof(char));
 657 }
 658 
 659 
 660 size_t VirtualSpace::reserved_size() const {
 661   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 662 }
 663 
 664 
 665 size_t VirtualSpace::uncommitted_size()  const {
 666   return reserved_size() - committed_size();
 667 }
 668 
 669 
 670 bool VirtualSpace::contains(const void* p) const {
 671   return low() <= (const char*) p && (const char*) p < high();
 672 }
 673 
 674 /*
 675    First we need to determine if a particular virtual space is using large
 676    pages.  This is done at the initialize function and only virtual spaces
 677    that are larger than LargePageSizeInBytes use large pages.  Once we
 678    have determined this, all expand_by and shrink_by calls must grow and
 679    shrink by large page size chunks.  If a particular request
 680    is within the current large page, the call to commit and uncommit memory
 681    can be ignored.  In the case that the low and high boundaries of this
 682    space is not large page aligned, the pages leading to the first large
 683    page address and the pages after the last large page address must be
 684    allocated with default pages.
 685 */
 686 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 687   if (uncommitted_size() < bytes) return false;
 688 
 689   if (special()) {
 690     // don't commit memory if the entire space is pinned in memory
 691     _high += bytes;
 692     return true;
 693   }
 694 
 695   char* previous_high = high();
 696   char* unaligned_new_high = high() + bytes;
 697   assert(unaligned_new_high <= high_boundary(),
 698          "cannot expand by more than upper boundary");
 699 
 700   // Calculate where the new high for each of the regions should be.  If
 701   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 702   // then the unaligned lower and upper new highs would be the
 703   // lower_high() and upper_high() respectively.
 704   char* unaligned_lower_new_high =
 705     MIN2(unaligned_new_high, lower_high_boundary());
 706   char* unaligned_middle_new_high =
 707     MIN2(unaligned_new_high, middle_high_boundary());
 708   char* unaligned_upper_new_high =
 709     MIN2(unaligned_new_high, upper_high_boundary());
 710 
 711   // Align the new highs based on the regions alignment.  lower and upper
 712   // alignment will always be default page size.  middle alignment will be
 713   // LargePageSizeInBytes if the actual size of the virtual space is in
 714   // fact larger than LargePageSizeInBytes.
 715   char* aligned_lower_new_high =
 716     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 717   char* aligned_middle_new_high =
 718     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 719   char* aligned_upper_new_high =
 720     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 721 
 722   // Determine which regions need to grow in this expand_by call.
 723   // If you are growing in the lower region, high() must be in that
 724   // region so calcuate the size based on high().  For the middle and
 725   // upper regions, determine the starting point of growth based on the
 726   // location of high().  By getting the MAX of the region's low address
 727   // (or the prevoius region's high address) and high(), we can tell if it
 728   // is an intra or inter region growth.
 729   size_t lower_needs = 0;
 730   if (aligned_lower_new_high > lower_high()) {
 731     lower_needs =
 732       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 733   }
 734   size_t middle_needs = 0;
 735   if (aligned_middle_new_high > middle_high()) {
 736     middle_needs =
 737       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 738   }
 739   size_t upper_needs = 0;
 740   if (aligned_upper_new_high > upper_high()) {
 741     upper_needs =
 742       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 743   }
 744 
 745   // Check contiguity.
 746   assert(low_boundary() <= lower_high() &&
 747          lower_high() <= lower_high_boundary(),
 748          "high address must be contained within the region");
 749   assert(lower_high_boundary() <= middle_high() &&
 750          middle_high() <= middle_high_boundary(),
 751          "high address must be contained within the region");
 752   assert(middle_high_boundary() <= upper_high() &&
 753          upper_high() <= upper_high_boundary(),
 754          "high address must be contained within the region");
 755 
 756   // Commit regions
 757   if (lower_needs > 0) {
 758     assert(low_boundary() <= lower_high() &&
 759            lower_high() + lower_needs <= lower_high_boundary(),
 760            "must not expand beyond region");
 761     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
 762       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 763                          ", lower_needs=" SIZE_FORMAT ", %d) failed",
 764                          lower_high(), lower_needs, _executable);)
 765       return false;
 766     } else {
 767       _lower_high += lower_needs;
 768     }
 769   }
 770   if (middle_needs > 0) {
 771     assert(lower_high_boundary() <= middle_high() &&
 772            middle_high() + middle_needs <= middle_high_boundary(),
 773            "must not expand beyond region");
 774     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
 775                            _executable)) {
 776       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 777                          ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
 778                          ", %d) failed", middle_high(), middle_needs,
 779                          middle_alignment(), _executable);)
 780       return false;
 781     }
 782     _middle_high += middle_needs;
 783   }
 784   if (upper_needs > 0) {
 785     assert(middle_high_boundary() <= upper_high() &&
 786            upper_high() + upper_needs <= upper_high_boundary(),
 787            "must not expand beyond region");
 788     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
 789       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 790                          ", upper_needs=" SIZE_FORMAT ", %d) failed",
 791                          upper_high(), upper_needs, _executable);)
 792       return false;
 793     } else {
 794       _upper_high += upper_needs;
 795     }
 796   }
 797 
 798   if (pre_touch || AlwaysPreTouch) {
 799     int vm_ps = os::vm_page_size();
 800     for (char* curr = previous_high;
 801          curr < unaligned_new_high;
 802          curr += vm_ps) {
 803       // Note the use of a write here; originally we tried just a read, but
 804       // since the value read was unused, the optimizer removed the read.
 805       // If we ever have a concurrent touchahead thread, we'll want to use
 806       // a read, to avoid the potential of overwriting data (if a mutator
 807       // thread beats the touchahead thread to a page).  There are various
 808       // ways of making sure this read is not optimized away: for example,
 809       // generating the code for a read procedure at runtime.
 810       *curr = 0;
 811     }
 812   }
 813 
 814   _high += bytes;
 815   return true;
 816 }
 817 
 818 // A page is uncommitted if the contents of the entire page is deemed unusable.
 819 // Continue to decrement the high() pointer until it reaches a page boundary
 820 // in which case that particular page can now be uncommitted.
 821 void VirtualSpace::shrink_by(size_t size) {
 822   if (committed_size() < size)
 823     fatal("Cannot shrink virtual space to negative size");
 824 
 825   if (special()) {
 826     // don't uncommit if the entire space is pinned in memory
 827     _high -= size;
 828     return;
 829   }
 830 
 831   char* unaligned_new_high = high() - size;
 832   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 833 
 834   // Calculate new unaligned address
 835   char* unaligned_upper_new_high =
 836     MAX2(unaligned_new_high, middle_high_boundary());
 837   char* unaligned_middle_new_high =
 838     MAX2(unaligned_new_high, lower_high_boundary());
 839   char* unaligned_lower_new_high =
 840     MAX2(unaligned_new_high, low_boundary());
 841 
 842   // Align address to region's alignment
 843   char* aligned_upper_new_high =
 844     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 845   char* aligned_middle_new_high =
 846     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 847   char* aligned_lower_new_high =
 848     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 849 
 850   // Determine which regions need to shrink
 851   size_t upper_needs = 0;
 852   if (aligned_upper_new_high < upper_high()) {
 853     upper_needs =
 854       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 855   }
 856   size_t middle_needs = 0;
 857   if (aligned_middle_new_high < middle_high()) {
 858     middle_needs =
 859       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 860   }
 861   size_t lower_needs = 0;
 862   if (aligned_lower_new_high < lower_high()) {
 863     lower_needs =
 864       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 865   }
 866 
 867   // Check contiguity.
 868   assert(middle_high_boundary() <= upper_high() &&
 869          upper_high() <= upper_high_boundary(),
 870          "high address must be contained within the region");
 871   assert(lower_high_boundary() <= middle_high() &&
 872          middle_high() <= middle_high_boundary(),
 873          "high address must be contained within the region");
 874   assert(low_boundary() <= lower_high() &&
 875          lower_high() <= lower_high_boundary(),
 876          "high address must be contained within the region");
 877 
 878   // Uncommit
 879   if (upper_needs > 0) {
 880     assert(middle_high_boundary() <= aligned_upper_new_high &&
 881            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 882            "must not shrink beyond region");
 883     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 884       debug_only(warning("os::uncommit_memory failed"));
 885       return;
 886     } else {
 887       _upper_high -= upper_needs;
 888     }
 889   }
 890   if (middle_needs > 0) {
 891     assert(lower_high_boundary() <= aligned_middle_new_high &&
 892            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 893            "must not shrink beyond region");
 894     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 895       debug_only(warning("os::uncommit_memory failed"));
 896       return;
 897     } else {
 898       _middle_high -= middle_needs;
 899     }
 900   }
 901   if (lower_needs > 0) {
 902     assert(low_boundary() <= aligned_lower_new_high &&
 903            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 904            "must not shrink beyond region");
 905     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 906       debug_only(warning("os::uncommit_memory failed"));
 907       return;
 908     } else {
 909       _lower_high -= lower_needs;
 910     }
 911   }
 912 
 913   _high -= size;
 914 }
 915 
 916 #ifndef PRODUCT
 917 void VirtualSpace::check_for_contiguity() {
 918   // Check contiguity.
 919   assert(low_boundary() <= lower_high() &&
 920          lower_high() <= lower_high_boundary(),
 921          "high address must be contained within the region");
 922   assert(lower_high_boundary() <= middle_high() &&
 923          middle_high() <= middle_high_boundary(),
 924          "high address must be contained within the region");
 925   assert(middle_high_boundary() <= upper_high() &&
 926          upper_high() <= upper_high_boundary(),
 927          "high address must be contained within the region");
 928   assert(low() >= low_boundary(), "low");
 929   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
 930   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
 931   assert(high() <= upper_high(), "upper high");
 932 }
 933 
 934 void VirtualSpace::print() {
 935   tty->print   ("Virtual space:");
 936   if (special()) tty->print(" (pinned in memory)");
 937   tty->cr();
 938   tty->print_cr(" - committed: %ld", committed_size());
 939   tty->print_cr(" - reserved:  %ld", reserved_size());
 940   tty->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
 941   tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
 942 }
 943 
 944 
 945 /////////////// Unit tests ///////////////
 946 
 947 #ifndef PRODUCT
 948 
 949 #define test_log(...) \
 950   do {\
 951     if (VerboseInternalVMTests) { \
 952       tty->print_cr(__VA_ARGS__); \
 953       tty->flush(); \
 954     }\
 955   } while (false)
 956 
 957 class TestReservedSpace : AllStatic {
 958  public:
 959   static void small_page_write(void* addr, size_t size) {
 960     size_t page_size = os::vm_page_size();
 961 
 962     char* end = (char*)addr + size;
 963     for (char* p = (char*)addr; p < end; p += page_size) {
 964       *p = 1;
 965     }
 966   }
 967 
 968   static void release_memory_for_test(ReservedSpace rs) {
 969     if (rs.special()) {
 970       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
 971     } else {
 972       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
 973     }
 974   }
 975 
 976   static void test_reserved_space1(size_t size, size_t alignment) {
 977     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
 978 
 979     assert(is_size_aligned(size, alignment), "Incorrect input parameters");
 980 
 981     ReservedSpace rs(size,          // size
 982                      alignment,     // alignment
 983                      UseLargePages, // large
 984                      NULL,          // requested_address
 985                      0);            // noacces_prefix
 986 
 987     test_log(" rs.special() == %d", rs.special());
 988 
 989     assert(rs.base() != NULL, "Must be");
 990     assert(rs.size() == size, "Must be");
 991 
 992     assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
 993     assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
 994 
 995     if (rs.special()) {
 996       small_page_write(rs.base(), size);
 997     }
 998 
 999     release_memory_for_test(rs);
1000   }
1001 
1002   static void test_reserved_space2(size_t size) {
1003     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
1004 
1005     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1006 
1007     ReservedSpace rs(size);
1008 
1009     test_log(" rs.special() == %d", rs.special());
1010 
1011     assert(rs.base() != NULL, "Must be");
1012     assert(rs.size() == size, "Must be");
1013 
1014     if (rs.special()) {
1015       small_page_write(rs.base(), size);
1016     }
1017 
1018     release_memory_for_test(rs);
1019   }
1020 
1021   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1022     test_log("test_reserved_space3(%p, %p, %d)",
1023         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
1024 
1025     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1026     assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
1027 
1028     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1029 
1030     ReservedSpace rs(size, alignment, large, false);
1031 
1032     test_log(" rs.special() == %d", rs.special());
1033 
1034     assert(rs.base() != NULL, "Must be");
1035     assert(rs.size() == size, "Must be");
1036 
1037     if (rs.special()) {
1038       small_page_write(rs.base(), size);
1039     }
1040 
1041     release_memory_for_test(rs);
1042   }
1043 
1044 
1045   static void test_reserved_space1() {
1046     size_t size = 2 * 1024 * 1024;
1047     size_t ag   = os::vm_allocation_granularity();
1048 
1049     test_reserved_space1(size,      ag);
1050     test_reserved_space1(size * 2,  ag);
1051     test_reserved_space1(size * 10, ag);
1052   }
1053 
1054   static void test_reserved_space2() {
1055     size_t size = 2 * 1024 * 1024;
1056     size_t ag = os::vm_allocation_granularity();
1057 
1058     test_reserved_space2(size * 1);
1059     test_reserved_space2(size * 2);
1060     test_reserved_space2(size * 10);
1061     test_reserved_space2(ag);
1062     test_reserved_space2(size - ag);
1063     test_reserved_space2(size);
1064     test_reserved_space2(size + ag);
1065     test_reserved_space2(size * 2);
1066     test_reserved_space2(size * 2 - ag);
1067     test_reserved_space2(size * 2 + ag);
1068     test_reserved_space2(size * 3);
1069     test_reserved_space2(size * 3 - ag);
1070     test_reserved_space2(size * 3 + ag);
1071     test_reserved_space2(size * 10);
1072     test_reserved_space2(size * 10 + size / 2);
1073   }
1074 
1075   static void test_reserved_space3() {
1076     size_t ag = os::vm_allocation_granularity();
1077 
1078     test_reserved_space3(ag,      ag    , false);
1079     test_reserved_space3(ag * 2,  ag    , false);
1080     test_reserved_space3(ag * 3,  ag    , false);
1081     test_reserved_space3(ag * 2,  ag * 2, false);
1082     test_reserved_space3(ag * 4,  ag * 2, false);
1083     test_reserved_space3(ag * 8,  ag * 2, false);
1084     test_reserved_space3(ag * 4,  ag * 4, false);
1085     test_reserved_space3(ag * 8,  ag * 4, false);
1086     test_reserved_space3(ag * 16, ag * 4, false);
1087 
1088     if (UseLargePages) {
1089       size_t lp = os::large_page_size();
1090 
1091       // Without large pages
1092       test_reserved_space3(lp,     ag * 4, false);
1093       test_reserved_space3(lp * 2, ag * 4, false);
1094       test_reserved_space3(lp * 4, ag * 4, false);
1095       test_reserved_space3(lp,     lp    , false);
1096       test_reserved_space3(lp * 2, lp    , false);
1097       test_reserved_space3(lp * 3, lp    , false);
1098       test_reserved_space3(lp * 2, lp * 2, false);
1099       test_reserved_space3(lp * 4, lp * 2, false);
1100       test_reserved_space3(lp * 8, lp * 2, false);
1101 
1102       // With large pages
1103       test_reserved_space3(lp, ag * 4    , true);
1104       test_reserved_space3(lp * 2, ag * 4, true);
1105       test_reserved_space3(lp * 4, ag * 4, true);
1106       test_reserved_space3(lp, lp        , true);
1107       test_reserved_space3(lp * 2, lp    , true);
1108       test_reserved_space3(lp * 3, lp    , true);
1109       test_reserved_space3(lp * 2, lp * 2, true);
1110       test_reserved_space3(lp * 4, lp * 2, true);
1111       test_reserved_space3(lp * 8, lp * 2, true);
1112     }
1113   }
1114 
1115   static void test_reserved_space() {
1116     test_reserved_space1();
1117     test_reserved_space2();
1118     test_reserved_space3();
1119   }
1120 };
1121 
1122 void TestReservedSpace_test() {
1123   TestReservedSpace::test_reserved_space();
1124 }
1125 
1126 #endif // PRODUCT
1127 
1128 #endif