1 /*
   2  * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "oops/markOop.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "runtime/virtualspace.hpp"
  29 #ifdef TARGET_OS_FAMILY_linux
  30 # include "os_linux.inline.hpp"
  31 #endif
  32 #ifdef TARGET_OS_FAMILY_solaris
  33 # include "os_solaris.inline.hpp"
  34 #endif
  35 #ifdef TARGET_OS_FAMILY_windows
  36 # include "os_windows.inline.hpp"
  37 #endif
  38 #ifdef TARGET_OS_FAMILY_bsd
  39 # include "os_bsd.inline.hpp"
  40 #endif
  41 
  42 
  43 // ReservedSpace
  44 ReservedSpace::ReservedSpace(size_t size) {
  45   initialize(size, 0, false, NULL, 0, false);
  46 }
  47 
  48 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  49                              bool large,
  50                              char* requested_address,
  51                              const size_t noaccess_prefix) {
  52   initialize(size+noaccess_prefix, alignment, large, requested_address,
  53              noaccess_prefix, false);
  54 }
  55 
  56 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  57                              bool large,
  58                              bool executable) {
  59   initialize(size, alignment, large, NULL, 0, executable);
  60 }
  61 
  62 char *
  63 ReservedSpace::align_reserved_region(char* addr, const size_t len,
  64                                      const size_t prefix_size,
  65                                      const size_t prefix_align,
  66                                      const size_t suffix_size,
  67                                      const size_t suffix_align)
  68 {
  69   assert(addr != NULL, "sanity");
  70   const size_t required_size = prefix_size + suffix_size;
  71   assert(len >= required_size, "len too small");
  72 
  73   const size_t s = size_t(addr);
  74   const size_t beg_ofs = (s + prefix_size) & (suffix_align - 1);
  75   const size_t beg_delta = beg_ofs == 0 ? 0 : suffix_align - beg_ofs;
  76 
  77   if (len < beg_delta + required_size) {
  78      return NULL; // Cannot do proper alignment.
  79   }
  80   const size_t end_delta = len - (beg_delta + required_size);
  81 
  82   if (beg_delta != 0) {
  83     os::release_memory(addr, beg_delta);
  84   }
  85 
  86   if (end_delta != 0) {
  87     char* release_addr = (char*) (s + beg_delta + required_size);
  88     os::release_memory(release_addr, end_delta);
  89   }
  90 
  91   return (char*) (s + beg_delta);
  92 }
  93 
  94 char* ReservedSpace::reserve_and_align(const size_t reserve_size,
  95                                        const size_t prefix_size,
  96                                        const size_t prefix_align,
  97                                        const size_t suffix_size,
  98                                        const size_t suffix_align)
  99 {
 100   assert(reserve_size > prefix_size + suffix_size, "should not be here");
 101 
 102   char* raw_addr = os::reserve_memory(reserve_size, NULL, prefix_align);
 103   if (raw_addr == NULL) return NULL;
 104 
 105   char* result = align_reserved_region(raw_addr, reserve_size, prefix_size,
 106                                        prefix_align, suffix_size,
 107                                        suffix_align);
 108   if (result == NULL && !os::release_memory(raw_addr, reserve_size)) {
 109     fatal("os::release_memory failed");
 110   }
 111 
 112 #ifdef ASSERT
 113   if (result != NULL) {
 114     const size_t raw = size_t(raw_addr);
 115     const size_t res = size_t(result);
 116     assert(res >= raw, "alignment decreased start addr");
 117     assert(res + prefix_size + suffix_size <= raw + reserve_size,
 118            "alignment increased end addr");
 119     assert((res & (prefix_align - 1)) == 0, "bad alignment of prefix");
 120     assert(((res + prefix_size) & (suffix_align - 1)) == 0,
 121            "bad alignment of suffix");
 122   }
 123 #endif
 124 
 125   return result;
 126 }
 127 
 128 // Helper method.
 129 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
 130                                            const size_t size, bool special)
 131 {
 132   if (base == requested_address || requested_address == NULL)
 133     return false; // did not fail
 134 
 135   if (base != NULL) {
 136     // Different reserve address may be acceptable in other cases
 137     // but for compressed oops heap should be at requested address.
 138     assert(UseCompressedOops, "currently requested address used only for compressed oops");
 139     if (PrintCompressedOopsMode) {
 140       tty->cr();
 141       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
 142     }
 143     // OS ignored requested address. Try different address.
 144     if (special) {
 145       if (!os::release_memory_special(base, size)) {
 146         fatal("os::release_memory_special failed");
 147       }
 148     } else {
 149       if (!os::release_memory(base, size)) {
 150         fatal("os::release_memory failed");
 151       }
 152     }
 153   }
 154   return true;
 155 }
 156 
 157 ReservedSpace::ReservedSpace(const size_t prefix_size,
 158                              const size_t prefix_align,
 159                              const size_t suffix_size,
 160                              const size_t suffix_align,
 161                              char* requested_address,
 162                              const size_t noaccess_prefix)
 163 {
 164   assert(prefix_size != 0, "sanity");
 165   assert(prefix_align != 0, "sanity");
 166   assert(suffix_size != 0, "sanity");
 167   assert(suffix_align != 0, "sanity");
 168   assert((prefix_size & (prefix_align - 1)) == 0,
 169     "prefix_size not divisible by prefix_align");
 170   assert((suffix_size & (suffix_align - 1)) == 0,
 171     "suffix_size not divisible by suffix_align");
 172   assert((suffix_align & (prefix_align - 1)) == 0,
 173     "suffix_align not divisible by prefix_align");
 174 
 175   // Assert that if noaccess_prefix is used, it is the same as prefix_align.
 176   assert(noaccess_prefix == 0 ||
 177          noaccess_prefix == prefix_align, "noaccess prefix wrong");
 178 
 179   // Add in noaccess_prefix to prefix_size;
 180   const size_t adjusted_prefix_size = prefix_size + noaccess_prefix;
 181   const size_t size = adjusted_prefix_size + suffix_size;
 182 
 183   // On systems where the entire region has to be reserved and committed up
 184   // front, the compound alignment normally done by this method is unnecessary.
 185   const bool try_reserve_special = UseLargePages &&
 186     prefix_align == os::large_page_size();
 187   if (!os::can_commit_large_page_memory() && try_reserve_special) {
 188     initialize(size, prefix_align, true, requested_address, noaccess_prefix,
 189                false);
 190     return;
 191   }
 192 
 193   _base = NULL;
 194   _size = 0;
 195   _alignment = 0;
 196   _special = false;
 197   _noaccess_prefix = 0;
 198   _executable = false;
 199 
 200   // Optimistically try to reserve the exact size needed.
 201   char* addr;
 202   if (requested_address != 0) {
 203     requested_address -= noaccess_prefix; // adjust address
 204     assert(requested_address != NULL, "huge noaccess prefix?");
 205     addr = os::attempt_reserve_memory_at(size, requested_address);
 206     if (failed_to_reserve_as_requested(addr, requested_address, size, false)) {
 207       // OS ignored requested address. Try different address.
 208       addr = NULL;
 209     }
 210   } else {
 211     addr = os::reserve_memory(size, NULL, prefix_align);
 212   }
 213   if (addr == NULL) return;
 214 
 215   // Check whether the result has the needed alignment (unlikely unless
 216   // prefix_align < suffix_align).
 217   const size_t ofs = (size_t(addr) + adjusted_prefix_size) & (suffix_align - 1);
 218   if (ofs != 0) {
 219     // Wrong alignment.  Release, allocate more space and do manual alignment.
 220     //
 221     // On most operating systems, another allocation with a somewhat larger size
 222     // will return an address "close to" that of the previous allocation.  The
 223     // result is often the same address (if the kernel hands out virtual
 224     // addresses from low to high), or an address that is offset by the increase
 225     // in size.  Exploit that to minimize the amount of extra space requested.
 226     if (!os::release_memory(addr, size)) {
 227       fatal("os::release_memory failed");
 228     }
 229 
 230     const size_t extra = MAX2(ofs, suffix_align - ofs);
 231     addr = reserve_and_align(size + extra, adjusted_prefix_size, prefix_align,
 232                              suffix_size, suffix_align);
 233     if (addr == NULL) {
 234       // Try an even larger region.  If this fails, address space is exhausted.
 235       addr = reserve_and_align(size + suffix_align, adjusted_prefix_size,
 236                                prefix_align, suffix_size, suffix_align);
 237     }
 238 
 239     if (requested_address != 0 &&
 240         failed_to_reserve_as_requested(addr, requested_address, size, false)) {
 241       // As a result of the alignment constraints, the allocated addr differs
 242       // from the requested address. Return back to the caller who can
 243       // take remedial action (like try again without a requested address).
 244       assert(_base == NULL, "should be");
 245       return;
 246     }
 247   }
 248 
 249   _base = addr;
 250   _size = size;
 251   _alignment = prefix_align;
 252   _noaccess_prefix = noaccess_prefix;
 253 }
 254 
 255 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
 256                                char* requested_address,
 257                                const size_t noaccess_prefix,
 258                                bool executable) {
 259   const size_t granularity = os::vm_allocation_granularity();
 260   assert((size & (granularity - 1)) == 0,
 261          "size not aligned to os::vm_allocation_granularity()");
 262   assert((alignment & (granularity - 1)) == 0,
 263          "alignment not aligned to os::vm_allocation_granularity()");
 264   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 265          "not a power of 2");
 266 
 267   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 268 
 269   // Assert that if noaccess_prefix is used, it is the same as alignment.
 270   assert(noaccess_prefix == 0 ||
 271          noaccess_prefix == alignment, "noaccess prefix wrong");
 272 
 273   _base = NULL;
 274   _size = 0;
 275   _special = false;
 276   _executable = executable;
 277   _alignment = 0;
 278   _noaccess_prefix = 0;
 279   if (size == 0) {
 280     return;
 281   }
 282 
 283   // If OS doesn't support demand paging for large page memory, we need
 284   // to use reserve_memory_special() to reserve and pin the entire region.
 285   bool special = large && !os::can_commit_large_page_memory();
 286   char* base = NULL;
 287 
 288   if (requested_address != 0) {
 289     requested_address -= noaccess_prefix; // adjust requested address
 290     assert(requested_address != NULL, "huge noaccess prefix?");
 291   }
 292 
 293   if (special) {
 294 
 295     base = os::reserve_memory_special(size, requested_address, executable);
 296 
 297     if (base != NULL) {
 298       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 299         // OS ignored requested address. Try different address.
 300         return;
 301       }
 302       // Check alignment constraints
 303       assert((uintptr_t) base % alignment == 0,
 304              "Large pages returned a non-aligned address");
 305       _special = true;
 306     } else {
 307       // failed; try to reserve regular memory below
 308       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 309                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 310         if (PrintCompressedOopsMode) {
 311           tty->cr();
 312           tty->print_cr("Reserve regular memory without large pages.");
 313         }
 314       }
 315     }
 316   }
 317 
 318   if (base == NULL) {
 319     // Optimistically assume that the OSes returns an aligned base pointer.
 320     // When reserving a large address range, most OSes seem to align to at
 321     // least 64K.
 322 
 323     // If the memory was requested at a particular address, use
 324     // os::attempt_reserve_memory_at() to avoid over mapping something
 325     // important.  If available space is not detected, return NULL.
 326 
 327     if (requested_address != 0) {
 328       base = os::attempt_reserve_memory_at(size, requested_address);
 329       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 330         // OS ignored requested address. Try different address.
 331         base = NULL;
 332       }
 333     } else {
 334       base = os::reserve_memory(size, NULL, alignment);
 335     }
 336 
 337     if (base == NULL) return;
 338 
 339     // Check alignment constraints
 340     if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
 341       // Base not aligned, retry
 342       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 343       // Reserve size large enough to do manual alignment and
 344       // increase size to a multiple of the desired alignment
 345       size = align_size_up(size, alignment);
 346       size_t extra_size = size + alignment;
 347       do {
 348         char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
 349         if (extra_base == NULL) return;
 350         // Do manual alignement
 351         base = (char*) align_size_up((uintptr_t) extra_base, alignment);
 352         assert(base >= extra_base, "just checking");
 353         // Re-reserve the region at the aligned base address.
 354         os::release_memory(extra_base, extra_size);
 355         base = os::reserve_memory(size, base);
 356       } while (base == NULL);
 357 
 358       if (requested_address != 0 &&
 359           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 360         // As a result of the alignment constraints, the allocated base differs
 361         // from the requested address. Return back to the caller who can
 362         // take remedial action (like try again without a requested address).
 363         assert(_base == NULL, "should be");
 364         return;
 365       }
 366     }
 367   }
 368   // Done
 369   _base = base;
 370   _size = size;
 371   _alignment = alignment;
 372   _noaccess_prefix = noaccess_prefix;
 373 
 374   // Assert that if noaccess_prefix is used, it is the same as alignment.
 375   assert(noaccess_prefix == 0 ||
 376          noaccess_prefix == _alignment, "noaccess prefix wrong");
 377 
 378   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 379          "area must be distinguisable from marks for mark-sweep");
 380   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 381          "area must be distinguisable from marks for mark-sweep");
 382 }
 383 
 384 
 385 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 386                              bool special, bool executable) {
 387   assert((size % os::vm_allocation_granularity()) == 0,
 388          "size not allocation aligned");
 389   _base = base;
 390   _size = size;
 391   _alignment = alignment;
 392   _noaccess_prefix = 0;
 393   _special = special;
 394   _executable = executable;
 395 }
 396 
 397 
 398 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 399                                         bool split, bool realloc) {
 400   assert(partition_size <= size(), "partition failed");
 401   if (split) {
 402     os::split_reserved_memory(base(), size(), partition_size, realloc);
 403   }
 404   ReservedSpace result(base(), partition_size, alignment, special(),
 405                        executable());
 406   return result;
 407 }
 408 
 409 
 410 ReservedSpace
 411 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 412   assert(partition_size <= size(), "partition failed");
 413   ReservedSpace result(base() + partition_size, size() - partition_size,
 414                        alignment, special(), executable());
 415   return result;
 416 }
 417 
 418 
 419 size_t ReservedSpace::page_align_size_up(size_t size) {
 420   return align_size_up(size, os::vm_page_size());
 421 }
 422 
 423 
 424 size_t ReservedSpace::page_align_size_down(size_t size) {
 425   return align_size_down(size, os::vm_page_size());
 426 }
 427 
 428 
 429 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 430   return align_size_up(size, os::vm_allocation_granularity());
 431 }
 432 
 433 
 434 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 435   return align_size_down(size, os::vm_allocation_granularity());
 436 }
 437 
 438 
 439 void ReservedSpace::release() {
 440   if (is_reserved()) {
 441     char *real_base = _base - _noaccess_prefix;
 442     const size_t real_size = _size + _noaccess_prefix;
 443     if (special()) {
 444       os::release_memory_special(real_base, real_size);
 445     } else{
 446       os::release_memory(real_base, real_size);
 447     }
 448     _base = NULL;
 449     _size = 0;
 450     _noaccess_prefix = 0;
 451     _special = false;
 452     _executable = false;
 453   }
 454 }
 455 
 456 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
 457   assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
 458                                       (size_t(_base + _size) > OopEncodingHeapMax) &&
 459                                       Universe::narrow_oop_use_implicit_null_checks()),
 460          "noaccess_prefix should be used only with non zero based compressed oops");
 461 
 462   // If there is no noaccess prefix, return.
 463   if (_noaccess_prefix == 0) return;
 464 
 465   assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
 466          "must be at least page size big");
 467 
 468   // Protect memory at the base of the allocated region.
 469   // If special, the page was committed (only matters on windows)
 470   if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
 471                           _special)) {
 472     fatal("cannot protect protection page");
 473   }
 474   if (PrintCompressedOopsMode) {
 475     tty->cr();
 476     tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
 477   }
 478 
 479   _base += _noaccess_prefix;
 480   _size -= _noaccess_prefix;
 481   assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
 482          "must be exactly of required size and alignment");
 483 }
 484 
 485 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
 486                                      bool large, char* requested_address) :
 487   ReservedSpace(size, alignment, large,
 488                 requested_address,
 489                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
 490                  Universe::narrow_oop_use_implicit_null_checks()) ?
 491                   lcm(os::vm_page_size(), alignment) : 0) {
 492   // Only reserved space for the java heap should have a noaccess_prefix
 493   // if using compressed oops.
 494   protect_noaccess_prefix(size);
 495 }
 496 
 497 ReservedHeapSpace::ReservedHeapSpace(const size_t prefix_size,
 498                                      const size_t prefix_align,
 499                                      const size_t suffix_size,
 500                                      const size_t suffix_align,
 501                                      char* requested_address) :
 502   ReservedSpace(prefix_size, prefix_align, suffix_size, suffix_align,
 503                 requested_address,
 504                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
 505                  Universe::narrow_oop_use_implicit_null_checks()) ?
 506                   lcm(os::vm_page_size(), prefix_align) : 0) {
 507   protect_noaccess_prefix(prefix_size+suffix_size);
 508 }
 509 
 510 // Reserve space for code segment.  Same as Java heap only we mark this as
 511 // executable.
 512 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 513                                      size_t rs_align,
 514                                      bool large) :
 515   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 516 }
 517 
 518 // VirtualSpace
 519 
 520 VirtualSpace::VirtualSpace() {
 521   _low_boundary           = NULL;
 522   _high_boundary          = NULL;
 523   _low                    = NULL;
 524   _high                   = NULL;
 525   _lower_high             = NULL;
 526   _middle_high            = NULL;
 527   _upper_high             = NULL;
 528   _lower_high_boundary    = NULL;
 529   _middle_high_boundary   = NULL;
 530   _upper_high_boundary    = NULL;
 531   _lower_alignment        = 0;
 532   _middle_alignment       = 0;
 533   _upper_alignment        = 0;
 534   _special                = false;
 535   _executable             = false;
 536 }
 537 
 538 
 539 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 540   if(!rs.is_reserved()) return false;  // allocation failed.
 541   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 542   _low_boundary  = rs.base();
 543   _high_boundary = low_boundary() + rs.size();
 544 
 545   _low = low_boundary();
 546   _high = low();
 547 
 548   _special = rs.special();
 549   _executable = rs.executable();
 550 
 551   // When a VirtualSpace begins life at a large size, make all future expansion
 552   // and shrinking occur aligned to a granularity of large pages.  This avoids
 553   // fragmentation of physical addresses that inhibits the use of large pages
 554   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 555   // page size, the only spaces that get handled this way are codecache and
 556   // the heap itself, both of which provide a substantial performance
 557   // boost in many benchmarks when covered by large pages.
 558   //
 559   // No attempt is made to force large page alignment at the very top and
 560   // bottom of the space if they are not aligned so already.
 561   _lower_alignment  = os::vm_page_size();
 562   _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1);
 563   _upper_alignment  = os::vm_page_size();
 564 
 565   // End of each region
 566   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
 567   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
 568   _upper_high_boundary = high_boundary();
 569 
 570   // High address of each region
 571   _lower_high = low_boundary();
 572   _middle_high = lower_high_boundary();
 573   _upper_high = middle_high_boundary();
 574 
 575   // commit to initial size
 576   if (committed_size > 0) {
 577     if (!expand_by(committed_size)) {
 578       return false;
 579     }
 580   }
 581   return true;
 582 }
 583 
 584 
 585 VirtualSpace::~VirtualSpace() {
 586   release();
 587 }
 588 
 589 
 590 void VirtualSpace::release() {
 591   // This does not release memory it never reserved.
 592   // Caller must release via rs.release();
 593   _low_boundary           = NULL;
 594   _high_boundary          = NULL;
 595   _low                    = NULL;
 596   _high                   = NULL;
 597   _lower_high             = NULL;
 598   _middle_high            = NULL;
 599   _upper_high             = NULL;
 600   _lower_high_boundary    = NULL;
 601   _middle_high_boundary   = NULL;
 602   _upper_high_boundary    = NULL;
 603   _lower_alignment        = 0;
 604   _middle_alignment       = 0;
 605   _upper_alignment        = 0;
 606   _special                = false;
 607   _executable             = false;
 608 }
 609 
 610 
 611 size_t VirtualSpace::committed_size() const {
 612   return pointer_delta(high(), low(), sizeof(char));
 613 }
 614 
 615 
 616 size_t VirtualSpace::reserved_size() const {
 617   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 618 }
 619 
 620 
 621 size_t VirtualSpace::uncommitted_size()  const {
 622   return reserved_size() - committed_size();
 623 }
 624 
 625 
 626 bool VirtualSpace::contains(const void* p) const {
 627   return low() <= (const char*) p && (const char*) p < high();
 628 }
 629 
 630 /*
 631    First we need to determine if a particular virtual space is using large
 632    pages.  This is done at the initialize function and only virtual spaces
 633    that are larger than LargePageSizeInBytes use large pages.  Once we
 634    have determined this, all expand_by and shrink_by calls must grow and
 635    shrink by large page size chunks.  If a particular request
 636    is within the current large page, the call to commit and uncommit memory
 637    can be ignored.  In the case that the low and high boundaries of this
 638    space is not large page aligned, the pages leading to the first large
 639    page address and the pages after the last large page address must be
 640    allocated with default pages.
 641 */
 642 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 643   if (uncommitted_size() < bytes) return false;
 644 
 645   if (special()) {
 646     // don't commit memory if the entire space is pinned in memory
 647     _high += bytes;
 648     return true;
 649   }
 650 
 651   char* previous_high = high();
 652   char* unaligned_new_high = high() + bytes;
 653   assert(unaligned_new_high <= high_boundary(),
 654          "cannot expand by more than upper boundary");
 655 
 656   // Calculate where the new high for each of the regions should be.  If
 657   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 658   // then the unaligned lower and upper new highs would be the
 659   // lower_high() and upper_high() respectively.
 660   char* unaligned_lower_new_high =
 661     MIN2(unaligned_new_high, lower_high_boundary());
 662   char* unaligned_middle_new_high =
 663     MIN2(unaligned_new_high, middle_high_boundary());
 664   char* unaligned_upper_new_high =
 665     MIN2(unaligned_new_high, upper_high_boundary());
 666 
 667   // Align the new highs based on the regions alignment.  lower and upper
 668   // alignment will always be default page size.  middle alignment will be
 669   // LargePageSizeInBytes if the actual size of the virtual space is in
 670   // fact larger than LargePageSizeInBytes.
 671   char* aligned_lower_new_high =
 672     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 673   char* aligned_middle_new_high =
 674     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 675   char* aligned_upper_new_high =
 676     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 677 
 678   // Determine which regions need to grow in this expand_by call.
 679   // If you are growing in the lower region, high() must be in that
 680   // region so calcuate the size based on high().  For the middle and
 681   // upper regions, determine the starting point of growth based on the
 682   // location of high().  By getting the MAX of the region's low address
 683   // (or the prevoius region's high address) and high(), we can tell if it
 684   // is an intra or inter region growth.
 685   size_t lower_needs = 0;
 686   if (aligned_lower_new_high > lower_high()) {
 687     lower_needs =
 688       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 689   }
 690   size_t middle_needs = 0;
 691   if (aligned_middle_new_high > middle_high()) {
 692     middle_needs =
 693       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 694   }
 695   size_t upper_needs = 0;
 696   if (aligned_upper_new_high > upper_high()) {
 697     upper_needs =
 698       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 699   }
 700 
 701   // Check contiguity.
 702   assert(low_boundary() <= lower_high() &&
 703          lower_high() <= lower_high_boundary(),
 704          "high address must be contained within the region");
 705   assert(lower_high_boundary() <= middle_high() &&
 706          middle_high() <= middle_high_boundary(),
 707          "high address must be contained within the region");
 708   assert(middle_high_boundary() <= upper_high() &&
 709          upper_high() <= upper_high_boundary(),
 710          "high address must be contained within the region");
 711 
 712   // Commit regions
 713   if (lower_needs > 0) {
 714     assert(low_boundary() <= lower_high() &&
 715            lower_high() + lower_needs <= lower_high_boundary(),
 716            "must not expand beyond region");
 717     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
 718       debug_only(warning("os::commit_memory failed"));
 719       return false;
 720     } else {
 721       _lower_high += lower_needs;
 722      }
 723   }
 724   if (middle_needs > 0) {
 725     assert(lower_high_boundary() <= middle_high() &&
 726            middle_high() + middle_needs <= middle_high_boundary(),
 727            "must not expand beyond region");
 728     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
 729                            _executable)) {
 730       debug_only(warning("os::commit_memory failed"));
 731       return false;
 732     }
 733     _middle_high += middle_needs;
 734   }
 735   if (upper_needs > 0) {
 736     assert(middle_high_boundary() <= upper_high() &&
 737            upper_high() + upper_needs <= upper_high_boundary(),
 738            "must not expand beyond region");
 739     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
 740       debug_only(warning("os::commit_memory failed"));
 741       return false;
 742     } else {
 743       _upper_high += upper_needs;
 744     }
 745   }
 746 
 747   if (pre_touch || AlwaysPreTouch) {
 748     int vm_ps = os::vm_page_size();
 749     for (char* curr = previous_high;
 750          curr < unaligned_new_high;
 751          curr += vm_ps) {
 752       // Note the use of a write here; originally we tried just a read, but
 753       // since the value read was unused, the optimizer removed the read.
 754       // If we ever have a concurrent touchahead thread, we'll want to use
 755       // a read, to avoid the potential of overwriting data (if a mutator
 756       // thread beats the touchahead thread to a page).  There are various
 757       // ways of making sure this read is not optimized away: for example,
 758       // generating the code for a read procedure at runtime.
 759       *curr = 0;
 760     }
 761   }
 762 
 763   _high += bytes;
 764   return true;
 765 }
 766 
 767 // A page is uncommitted if the contents of the entire page is deemed unusable.
 768 // Continue to decrement the high() pointer until it reaches a page boundary
 769 // in which case that particular page can now be uncommitted.
 770 void VirtualSpace::shrink_by(size_t size) {
 771   if (committed_size() < size)
 772     fatal("Cannot shrink virtual space to negative size");
 773 
 774   if (special()) {
 775     // don't uncommit if the entire space is pinned in memory
 776     _high -= size;
 777     return;
 778   }
 779 
 780   char* unaligned_new_high = high() - size;
 781   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 782 
 783   // Calculate new unaligned address
 784   char* unaligned_upper_new_high =
 785     MAX2(unaligned_new_high, middle_high_boundary());
 786   char* unaligned_middle_new_high =
 787     MAX2(unaligned_new_high, lower_high_boundary());
 788   char* unaligned_lower_new_high =
 789     MAX2(unaligned_new_high, low_boundary());
 790 
 791   // Align address to region's alignment
 792   char* aligned_upper_new_high =
 793     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 794   char* aligned_middle_new_high =
 795     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 796   char* aligned_lower_new_high =
 797     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 798 
 799   // Determine which regions need to shrink
 800   size_t upper_needs = 0;
 801   if (aligned_upper_new_high < upper_high()) {
 802     upper_needs =
 803       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 804   }
 805   size_t middle_needs = 0;
 806   if (aligned_middle_new_high < middle_high()) {
 807     middle_needs =
 808       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 809   }
 810   size_t lower_needs = 0;
 811   if (aligned_lower_new_high < lower_high()) {
 812     lower_needs =
 813       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 814   }
 815 
 816   // Check contiguity.
 817   assert(middle_high_boundary() <= upper_high() &&
 818          upper_high() <= upper_high_boundary(),
 819          "high address must be contained within the region");
 820   assert(lower_high_boundary() <= middle_high() &&
 821          middle_high() <= middle_high_boundary(),
 822          "high address must be contained within the region");
 823   assert(low_boundary() <= lower_high() &&
 824          lower_high() <= lower_high_boundary(),
 825          "high address must be contained within the region");
 826 
 827   // Uncommit
 828   if (upper_needs > 0) {
 829     assert(middle_high_boundary() <= aligned_upper_new_high &&
 830            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 831            "must not shrink beyond region");
 832     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 833       debug_only(warning("os::uncommit_memory failed"));
 834       return;
 835     } else {
 836       _upper_high -= upper_needs;
 837     }
 838   }
 839   if (middle_needs > 0) {
 840     assert(lower_high_boundary() <= aligned_middle_new_high &&
 841            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 842            "must not shrink beyond region");
 843     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 844       debug_only(warning("os::uncommit_memory failed"));
 845       return;
 846     } else {
 847       _middle_high -= middle_needs;
 848     }
 849   }
 850   if (lower_needs > 0) {
 851     assert(low_boundary() <= aligned_lower_new_high &&
 852            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 853            "must not shrink beyond region");
 854     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 855       debug_only(warning("os::uncommit_memory failed"));
 856       return;
 857     } else {
 858       _lower_high -= lower_needs;
 859     }
 860   }
 861 
 862   _high -= size;
 863 }
 864 
 865 #ifndef PRODUCT
 866 void VirtualSpace::check_for_contiguity() {
 867   // Check contiguity.
 868   assert(low_boundary() <= lower_high() &&
 869          lower_high() <= lower_high_boundary(),
 870          "high address must be contained within the region");
 871   assert(lower_high_boundary() <= middle_high() &&
 872          middle_high() <= middle_high_boundary(),
 873          "high address must be contained within the region");
 874   assert(middle_high_boundary() <= upper_high() &&
 875          upper_high() <= upper_high_boundary(),
 876          "high address must be contained within the region");
 877   assert(low() >= low_boundary(), "low");
 878   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
 879   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
 880   assert(high() <= upper_high(), "upper high");
 881 }
 882 
 883 void VirtualSpace::print() {
 884   tty->print   ("Virtual space:");
 885   if (special()) tty->print(" (pinned in memory)");
 886   tty->cr();
 887   tty->print_cr(" - committed: %ld", committed_size());
 888   tty->print_cr(" - reserved:  %ld", reserved_size());
 889   tty->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
 890   tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
 891 }
 892 
 893 #endif