< prev index next >

src/hotspot/share/memory/virtualspace.cpp

Print this page




  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "logging/log.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "memory/virtualspace.hpp"
  29 #include "oops/markOop.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "runtime/os.inline.hpp"
  32 #include "services/memTracker.hpp"
  33 #include "utilities/align.hpp"
  34 
  35 // ReservedSpace
  36 
  37 // Dummy constructor
  38 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0), 
  39     _nvdimm_base_nv(NULL), _nvdimm_size(0), _dram_size(0),
  40     _alignment(0), _special(false), _executable(false), _fd_for_heap(-1) {
  41 }
  42 
  43 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1), 
  44     _nvdimm_base_nv(NULL), _nvdimm_size(0), _dram_size(0) {
  45   bool has_preferred_page_size = preferred_page_size != 0;
  46   // Want to use large pages where possible and pad with small pages.
  47   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
  48   bool large_pages = page_size != (size_t)os::vm_page_size();
  49   size_t alignment;
  50   if (large_pages && has_preferred_page_size) {
  51     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  52     // ReservedSpace initialization requires size to be aligned to the given
  53     // alignment. Align the size up.
  54     size = align_up(size, alignment);
  55   } else {
  56     // Don't force the alignment to be large page aligned,
  57     // since that will waste memory.
  58     alignment = os::vm_allocation_granularity();
  59   }
  60   initialize(size, alignment, large_pages, NULL, false);
  61 }
  62 
  63 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  64                              bool large,
  65                              char* requested_address) : _fd_for_heap(-1), 
  66                              _nvdimm_base_nv(NULL), _nvdimm_size(0), _dram_size(0) {
  67   initialize(size, alignment, large, requested_address, false);
  68 }
  69 
  70 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  71                              bool large,
  72                              bool executable) : _fd_for_heap(-1), 
  73                              _nvdimm_base_nv(NULL), _nvdimm_size(0), _dram_size(0) {
  74   initialize(size, alignment, large, NULL, executable);
  75 }
  76 
  77 // Helper method
  78 static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) {
  79   if (is_file_mapped) {
  80     if (!os::unmap_memory(base, size)) {
  81       fatal("os::unmap_memory failed");
  82     }
  83   } else if (!os::release_memory(base, size)) {
  84     fatal("os::release_memory failed");
  85   }
  86 }
  87 
  88 // Helper method.
  89 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  90                                            const size_t size, bool special, bool is_file_mapped = false)
  91 {
  92   if (base == requested_address || requested_address == NULL)
  93     return false; // did not fail


 130   _noaccess_prefix = 0;
 131   if (size == 0) {
 132     return;
 133   }
 134 
 135   // If OS doesn't support demand paging for large page memory, we need
 136   // to use reserve_memory_special() to reserve and pin the entire region.
 137   // If there is a backing file directory for this space then whether
 138   // large pages are allocated is up to the filesystem of the backing file.
 139   // So we ignore the UseLargePages flag in this case.
 140   bool special = large && !os::can_commit_large_page_memory();
 141   if (special && _fd_for_heap != -1) {
 142     special = false;
 143     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 144       !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 145       log_debug(gc, heap)("Ignoring UseLargePages since large page support is up to the file system of the backing file for Java heap");
 146     }
 147   }
 148 
 149   char* base = NULL;
 150   char* nvdimm_base = NULL;
 151 
 152   if (special) {
 153 
 154     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 155 
 156     if (base != NULL) {
 157       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 158         // OS ignored requested address. Try different address.
 159         return;
 160       }
 161       // Check alignment constraints.
 162       assert((uintptr_t) base % alignment == 0,
 163              "Large pages returned a non-aligned address, base: "
 164              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 165              p2i(base), alignment);
 166       _special = true;
 167     } else {
 168       // failed; try to reserve regular memory below
 169       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 170                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 171         log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 172       }
 173     }
 174   }
 175   int fd = -1;
 176   if (AllocateOldGenAt == NULL && _fd_for_heap != -1) {
 177     // AllocateHeapAt is in use.
 178     fd = _fd_for_heap;
 179   }
 180   if (base == NULL) {
 181     // Optimistically assume that the OSes returns an aligned base pointer.
 182     // When reserving a large address range, most OSes seem to align to at
 183     // least 64K.
 184 
 185     // If the memory was requested at a particular address, use
 186     // os::attempt_reserve_memory_at() to avoid over mapping something
 187     // important.  If available space is not detected, return NULL.
 188 
 189     if (requested_address != 0) {
 190       base = os::attempt_reserve_memory_at(size, requested_address, fd);
 191       if (failed_to_reserve_as_requested(base, requested_address, size, false, fd != -1)) {
 192         // OS ignored requested address. Try different address.
 193         base = NULL;
 194       }
 195     } else {
 196       if (_nvdimm_base_nv != NULL && _fd_for_heap != -1) {
 197         base = os::reserve_memory(_dram_size, _nvdimm_base_nv, alignment, fd);
 198       } else {
 199         base = os::reserve_memory(size, NULL, alignment, fd);
 200       }
 201     }
 202 
 203     if (base == NULL) return;
 204 
 205     // Check alignment constraints
 206     if ((((size_t)base) & (alignment - 1)) != 0) {
 207       // Base not aligned, retry
 208       unmap_or_release_memory(base, size, fd != -1 /*is_file_mapped*/);
 209 
 210       // Make sure that size is aligned
 211       size = align_up(size, alignment);
 212       base = os::reserve_memory_aligned(size, alignment, fd);
 213 
 214       if (requested_address != 0 &&
 215           failed_to_reserve_as_requested(base, requested_address, size, false, fd != -1)) {
 216         // As a result of the alignment constraints, the allocated base differs
 217         // from the requested address. Return back to the caller who can
 218         // take remedial action (like try again without a requested address).
 219         assert(_base == NULL, "should be");
 220         return;
 221       }
 222     }
 223   }
 224   // Done
 225   _base = base;
 226   _nvdimm_base = _base-_nvdimm_size;
 227   _nvdimm_base_nv = NULL;
 228   _dram_size = (size_t)size;
 229   _size = size;
 230   _alignment = alignment;
 231   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 232   if (fd != -1) {
 233     _special = true;
 234   }
 235 }
 236 
 237 
 238 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 239                              bool special, bool executable) {
 240   assert((size % os::vm_allocation_granularity()) == 0,
 241          "size not allocation aligned");
 242   _base = base;
 243   _size = size;
 244   _nvdimm_base = NULL; 
 245   _nvdimm_base_nv = NULL;
 246   _dram_size = (size_t)size;
 247   _alignment = alignment;
 248   _noaccess_prefix = 0;
 249   _special = special;
 250   _executable = executable;
 251 }
 252 

 253 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 254                                         bool split, bool realloc) {
 255   assert(partition_size <= size(), "partition failed");
 256   if (split) {
 257     os::split_reserved_memory(base(), size(), partition_size, realloc);
 258   }
 259   ReservedSpace result(base(), partition_size, alignment, special(),
 260                        executable());
 261   return result;
 262 }
 263 
 264 
 265 ReservedSpace
 266 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 267   assert(partition_size <= size(), "partition failed");
 268   ReservedSpace result(base() + partition_size, size() - partition_size,
 269                        alignment, special(), executable());
 270   return result;
 271 }
 272 


 278 
 279 size_t ReservedSpace::page_align_size_down(size_t size) {
 280   return align_down(size, os::vm_page_size());
 281 }
 282 
 283 
 284 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 285   return align_up(size, os::vm_allocation_granularity());
 286 }
 287 
 288 
 289 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 290   return align_down(size, os::vm_allocation_granularity());
 291 }
 292 
 293 
 294 void ReservedSpace::release() {
 295   if (is_reserved()) {
 296     char *real_base = _base - _noaccess_prefix;
 297     const size_t real_size = _size + _noaccess_prefix;
 298     // unmap nvdimm
 299     if (_nvdimm_base != NULL) {
 300       os::unmap_memory(_nvdimm_base, _nvdimm_size);
 301     }
 302     if (special()) {
 303       if (_fd_for_heap != -1) {
 304         os::unmap_memory(real_base, real_size);
 305       } else {
 306         os::release_memory_special(real_base, real_size);
 307       }
 308     } else{
 309       os::release_memory(real_base, real_size);
 310     }
 311     _base = NULL;
 312     _nvdimm_base = NULL;
 313     _nvdimm_base_nv = NULL;
 314     _dram_size = 0;
 315     _nvdimm_size = 0;
 316     _size = 0;
 317     _noaccess_prefix = 0;
 318     _alignment = 0;
 319     _special = false;
 320     _executable = false;
 321   }
 322 }
 323 
 324 static size_t noaccess_prefix_size(size_t alignment) {
 325   return lcm(os::vm_page_size(), alignment);
 326 }
 327 
 328 void ReservedHeapSpace::establish_noaccess_prefix() {
 329   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 330   _noaccess_prefix = noaccess_prefix_size(_alignment);
 331 
 332   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 333     if (true
 334         WIN64_ONLY(&& !UseLargePages)
 335         AIX_ONLY(&& os::vm_page_size() != 64*K)) {
 336       // Protect memory at the base of the allocated region.
 337       // If special, the page was committed (only matters on windows)
 338       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 339         fatal("cannot protect protection page");
 340       }
 341       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
 342                                  PTR_FORMAT " / " INTX_FORMAT " bytes",
 343                                  p2i(_base),
 344                                  _noaccess_prefix);
 345       assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
 346     } else {
 347       Universe::set_narrow_oop_use_implicit_null_checks(false);
 348     }
 349   }
 350 
 351   _base += _noaccess_prefix;
 352   _size -= _noaccess_prefix;
 353   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 354 }
 355 
 356 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 357 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 358 // might still fulfill the wishes of the caller.
 359 // Assures the memory is aligned to 'alignment'.
 360 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 361 void ReservedHeapSpace::try_reserve_heap(size_t size,
 362                                          size_t alignment,
 363                                          bool large,
 364                                          char* requested_address) {
 365   if (_base != NULL) {
 366     // We tried before, but we didn't like the address delivered.
 367     release();
 368   }
 369 
 370   if (_fd_for_heap != -1 && UseG1GC && AllocateOldGenAt != NULL) {
 371     char* base_nv = os::reserve_memory(size, requested_address, alignment);
 372     initialize_g1gc_nvdimm_dram_sizes(size, alignment);
 373     _nvdimm_base_nv = base_nv+_nvdimm_size; // hint for allocation address of DRAM COMPRESSED HEAP.
 374   }
 375 
 376   
 377   // If OS doesn't support demand paging for large page memory, we need
 378   // to use reserve_memory_special() to reserve and pin the entire region.
 379   // If there is a backing file directory for this space then whether
 380   // large pages are allocated is up to the filesystem of the backing file.
 381   // So we ignore the UseLargePages flag in this case.
 382   bool special = large && !os::can_commit_large_page_memory();
 383   if (special && _fd_for_heap != -1) {
 384     special = false;
 385     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 386                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 387       log_debug(gc, heap)("Cannot allocate large pages for Java Heap when AllocateHeapAt option is set.");
 388     }
 389   }
 390   char* base = NULL;
 391   char* nvdimm_base = NULL;
 392 
 393   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
 394                              " heap of size " SIZE_FORMAT_HEX,
 395                              p2i(requested_address),
 396                              size);
 397 
 398   if (special) {
 399     base = os::reserve_memory_special(size, alignment, requested_address, false);
 400 
 401     if (base != NULL) {
 402       // Check alignment constraints.
 403       assert((uintptr_t) base % alignment == 0,
 404              "Large pages returned a non-aligned address, base: "
 405              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 406              p2i(base), alignment);
 407       _special = true;
 408     }
 409   }
 410 
 411   if (base == NULL) {
 412     // Failed; try to reserve regular memory below
 413     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 414                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 415       log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 416     }
 417 
 418     // Optimistically assume that the OSes returns an aligned base pointer.
 419     // When reserving a large address range, most OSes seem to align to at
 420     // least 64K.
 421 
 422     // If the memory was requested at a particular address, use
 423     // os::attempt_reserve_memory_at() to avoid over mapping something
 424     // important.  If available space is not detected, return NULL.
 425 
 426     if (requested_address != 0) {
 427       if (_nvdimm_base_nv != NULL && _fd_for_heap != -1 && AllocateOldGenAt != NULL) {
 428         // first unmap so that OS does not keep trying.
 429         os::unmap_memory(_nvdimm_base_nv, _dram_size);
 430         base = os::attempt_reserve_memory_at(_dram_size, _nvdimm_base_nv);
 431       } else {
 432         base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
 433       }
 434     } else {
 435       if (_nvdimm_base_nv != NULL && _fd_for_heap != -1 && AllocateOldGenAt != NULL) {
 436         // first unmap so that OS does not keep trying.
 437         os::unmap_memory(_nvdimm_base_nv, _dram_size);
 438         base = os::reserve_memory(_dram_size, _nvdimm_base_nv, alignment);
 439       } else {
 440         base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
 441       }
 442     }
 443   }
 444   if (base == NULL) { return; }
 445 
 446   // Done
 447   _base = base;
 448   _nvdimm_base = _base-_nvdimm_size;
 449   if (_nvdimm_base_nv != NULL && _fd_for_heap != -1 && AllocateOldGenAt != NULL) {
 450     _size = _dram_size;
 451   } else {
 452     _size = size;
 453   }
 454   _alignment = alignment;
 455 
 456   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 457   if (_fd_for_heap != -1 && AllocateOldGenAt == NULL) {
 458     _special = true;
 459   }
 460 
 461   // Check alignment constraints
 462   if ((((size_t)base) & (alignment - 1)) != 0) {
 463     // Base not aligned, retry.
 464     release();
 465   }
 466 }
 467 
 468 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 469                                           char *lowest_start,
 470                                           size_t attach_point_alignment,
 471                                           char *aligned_heap_base_min_address,
 472                                           char *upper_bound,
 473                                           size_t size,


 635     char** addresses = get_attach_addresses_for_disjoint_mode();
 636     int i = 0;
 637     while (addresses[i] &&                                 // End of array not yet reached.
 638            ((_base == NULL) ||                             // No previous try succeeded.
 639             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 640              !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 641       char* const attach_point = addresses[i];
 642       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 643       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 644       i++;
 645     }
 646 
 647     // Last, desperate try without any placement.
 648     if (_base == NULL) {
 649       log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
 650       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 651     }
 652   }
 653 }
 654 
 655 void ReservedHeapSpace::initialize_g1gc_nvdimm_dram_sizes(size_t size, size_t alignment) {
 656   _dram_size = (size_t)((size * G1MaxNewSizePercent)/100);
 657   size_t page_sz = os::vm_page_size() -1 ;
 658   _dram_size = (_dram_size + page_sz) & (~page_sz);
 659   // align sizes.
 660   _dram_size = align_down(_dram_size, alignment);
 661   _nvdimm_size = size - _dram_size;
 662   _nvdimm_size = (_nvdimm_size + page_sz) & (~page_sz);
 663   _nvdimm_size = align_down(_nvdimm_size, alignment);
 664 }
 665 
 666 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* heap_allocation_directory) : ReservedSpace() {
 667 
 668   if (size == 0) {
 669     return;
 670   }
 671 
 672   // if AllocateOldGen is used  
 673   if (AllocateOldGenAt != NULL) {
 674     _fd_for_heap = os::create_file_for_heap(AllocateOldGenAt);
 675     if (_fd_for_heap== -1) {
 676       vm_exit_during_initialization(
 677         err_msg("Could not create file for Heap at location %s", AllocateOldGenAt));
 678     }
 679     if (UseParallelOldGC) {
 680       // For ParallelOldGC, adaptive sizing picks _old_gen virtual space sizes as needed.
 681       // allocate Xmx on NVDIMM as adaptive sizing may put lot of pressure on NVDIMM.
 682       os::allocate_file(_fd_for_heap, MaxHeapSize);
 683       os::set_nvdimm_fd(_fd_for_heap);
 684       os::set_nvdimm_present(true);
 685     }
 686   } else {
 687     _fd_for_heap = -1;
 688   }
 689 
 690   if (heap_allocation_directory != NULL) {
 691     _fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
 692     if (_fd_for_heap == -1) {
 693       vm_exit_during_initialization(
 694         err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
 695     }
 696   }
 697 
 698   // Heap size should be aligned to alignment, too.
 699   guarantee(is_aligned(size, alignment), "set by caller");
 700 
 701   char* base_nv = NULL;
 702   _nvdimm_base_nv = NULL;
 703   
 704   if (_fd_for_heap != -1 && UseG1GC && AllocateOldGenAt != NULL) {
 705     if (!UseCompressedOops) {
 706       // if compressed oops use requested address.
 707       initialize_g1gc_nvdimm_dram_sizes(size, alignment);
 708       base_nv = os::reserve_memory(size, NULL, alignment);
 709       _nvdimm_base_nv = base_nv+_nvdimm_size; // hint for allocation address of DRAM heap
 710     }
 711   }
 712 
 713   if (UseCompressedOops) {
 714     initialize_compressed_heap(size, alignment, large);
 715     if (_size > size) {
 716       // We allocated heap with noaccess prefix.
 717       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 718       // if we had to try at arbitrary address.
 719       establish_noaccess_prefix();
 720     }
 721   } else {
 722     if (_fd_for_heap != -1 && UseG1GC && AllocateOldGenAt != NULL) {
 723       initialize(_dram_size, alignment, large, NULL, false);
 724     } else {
 725       initialize(size, alignment, large, NULL, false);
 726     }
 727   }
 728 
 729   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 730          "area must be distinguishable from marks for mark-sweep");
 731   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 732          "area must be distinguishable from marks for mark-sweep");
 733 
 734   if (base() != NULL) {
 735     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 736     if (_fd_for_heap != -1 && UseG1GC && AllocateOldGenAt != NULL) {
 737       os::set_nvdimm_present(true);
 738       os::set_dram_heapbase((address)_base);
 739       os::set_nvdimm_heapbase((address)_nvdimm_base);
 740       os::set_nvdimm_fd(_fd_for_heap);
 741       _size += _nvdimm_size;
 742       _base = _nvdimm_base;
 743       log_info(gc, heap)("Java DRAM Heap at [%p - %p] & NVDIMM Old Gen at [%p - %p] %ld \n", _nvdimm_base+_nvdimm_size, (char*)(_nvdimm_base+_nvdimm_size+_dram_size), _nvdimm_base, (char*)(_nvdimm_base+_nvdimm_size), size);
 744     }
 745   }
 746 
 747   if (_fd_for_heap != -1 && AllocateOldGenAt == NULL) {
 748     os::close(_fd_for_heap);
 749   }
 750 }
 751 
 752 // Reserve space for code segment.  Same as Java heap only we mark this as
 753 // executable.
 754 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 755                                      size_t rs_align,
 756                                      bool large) :
 757   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 758   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 759 }
 760 
 761 // VirtualSpace
 762 
 763 VirtualSpace::VirtualSpace() {




  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "logging/log.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "memory/virtualspace.hpp"
  29 #include "oops/markOop.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "runtime/os.inline.hpp"
  32 #include "services/memTracker.hpp"
  33 #include "utilities/align.hpp"
  34 
  35 // ReservedSpace
  36 
  37 // Dummy constructor
  38 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),

  39     _alignment(0), _special(false), _executable(false), _fd_for_heap(-1) {
  40 }
  41 
  42 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) {

  43   bool has_preferred_page_size = preferred_page_size != 0;
  44   // Want to use large pages where possible and pad with small pages.
  45   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
  46   bool large_pages = page_size != (size_t)os::vm_page_size();
  47   size_t alignment;
  48   if (large_pages && has_preferred_page_size) {
  49     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  50     // ReservedSpace initialization requires size to be aligned to the given
  51     // alignment. Align the size up.
  52     size = align_up(size, alignment);
  53   } else {
  54     // Don't force the alignment to be large page aligned,
  55     // since that will waste memory.
  56     alignment = os::vm_allocation_granularity();
  57   }
  58   initialize(size, alignment, large_pages, NULL, false);
  59 }
  60 
  61 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  62                              bool large,
  63                              char* requested_address) : _fd_for_heap(-1) {

  64   initialize(size, alignment, large, requested_address, false);
  65 }
  66 
  67 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  68                              bool large,
  69                              bool executable) : _fd_for_heap(-1) {

  70   initialize(size, alignment, large, NULL, executable);
  71 }
  72 
  73 // Helper method
  74 static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) {
  75   if (is_file_mapped) {
  76     if (!os::unmap_memory(base, size)) {
  77       fatal("os::unmap_memory failed");
  78     }
  79   } else if (!os::release_memory(base, size)) {
  80     fatal("os::release_memory failed");
  81   }
  82 }
  83 
  84 // Helper method.
  85 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  86                                            const size_t size, bool special, bool is_file_mapped = false)
  87 {
  88   if (base == requested_address || requested_address == NULL)
  89     return false; // did not fail


 126   _noaccess_prefix = 0;
 127   if (size == 0) {
 128     return;
 129   }
 130 
 131   // If OS doesn't support demand paging for large page memory, we need
 132   // to use reserve_memory_special() to reserve and pin the entire region.
 133   // If there is a backing file directory for this space then whether
 134   // large pages are allocated is up to the filesystem of the backing file.
 135   // So we ignore the UseLargePages flag in this case.
 136   bool special = large && !os::can_commit_large_page_memory();
 137   if (special && _fd_for_heap != -1) {
 138     special = false;
 139     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 140       !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 141       log_debug(gc, heap)("Ignoring UseLargePages since large page support is up to the file system of the backing file for Java heap");
 142     }
 143   }
 144 
 145   char* base = NULL;

 146 
 147   if (special) {
 148 
 149     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 150 
 151     if (base != NULL) {
 152       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 153         // OS ignored requested address. Try different address.
 154         return;
 155       }
 156       // Check alignment constraints.
 157       assert((uintptr_t) base % alignment == 0,
 158              "Large pages returned a non-aligned address, base: "
 159              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 160              p2i(base), alignment);
 161       _special = true;
 162     } else {
 163       // failed; try to reserve regular memory below
 164       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 165                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 166         log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 167       }
 168     }
 169   }
 170 




 171   if (base == NULL) {
 172     // Optimistically assume that the OSes returns an aligned base pointer.
 173     // When reserving a large address range, most OSes seem to align to at
 174     // least 64K.
 175 
 176     // If the memory was requested at a particular address, use
 177     // os::attempt_reserve_memory_at() to avoid over mapping something
 178     // important.  If available space is not detected, return NULL.
 179 
 180     if (requested_address != 0) {
 181       base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
 182       if (failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
 183         // OS ignored requested address. Try different address.
 184         base = NULL;
 185       }
 186     } else {
 187       base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);




 188     }
 189 
 190     if (base == NULL) return;
 191 
 192     // Check alignment constraints
 193     if ((((size_t)base) & (alignment - 1)) != 0) {
 194       // Base not aligned, retry
 195       unmap_or_release_memory(base, size, _fd_for_heap != -1 /*is_file_mapped*/);
 196 
 197       // Make sure that size is aligned
 198       size = align_up(size, alignment);
 199       base = os::reserve_memory_aligned(size, alignment, _fd_for_heap);
 200 
 201       if (requested_address != 0 &&
 202           failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
 203         // As a result of the alignment constraints, the allocated base differs
 204         // from the requested address. Return back to the caller who can
 205         // take remedial action (like try again without a requested address).
 206         assert(_base == NULL, "should be");
 207         return;
 208       }
 209     }
 210   }
 211   // Done
 212   _base = base;



 213   _size = size;
 214   _alignment = alignment;
 215   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 216   if (_fd_for_heap != -1 && AllocateOldGenAt == NULL) {
 217     _special = true;
 218   }
 219 }
 220 
 221 
 222 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 223                              bool special, bool executable) {
 224   assert((size % os::vm_allocation_granularity()) == 0,
 225          "size not allocation aligned");
 226   _base = base;
 227   _size = size;



 228   _alignment = alignment;
 229   _noaccess_prefix = 0;
 230   _special = special;
 231   _executable = executable;
 232 }
 233 
 234 
 235 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 236                                         bool split, bool realloc) {
 237   assert(partition_size <= size(), "partition failed");
 238   if (split) {
 239     os::split_reserved_memory(base(), size(), partition_size, realloc);
 240   }
 241   ReservedSpace result(base(), partition_size, alignment, special(),
 242                        executable());
 243   return result;
 244 }
 245 
 246 
 247 ReservedSpace
 248 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 249   assert(partition_size <= size(), "partition failed");
 250   ReservedSpace result(base() + partition_size, size() - partition_size,
 251                        alignment, special(), executable());
 252   return result;
 253 }
 254 


 260 
 261 size_t ReservedSpace::page_align_size_down(size_t size) {
 262   return align_down(size, os::vm_page_size());
 263 }
 264 
 265 
 266 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 267   return align_up(size, os::vm_allocation_granularity());
 268 }
 269 
 270 
 271 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 272   return align_down(size, os::vm_allocation_granularity());
 273 }
 274 
 275 
 276 void ReservedSpace::release() {
 277   if (is_reserved()) {
 278     char *real_base = _base - _noaccess_prefix;
 279     const size_t real_size = _size + _noaccess_prefix;




 280     if (special()) {
 281       if (_fd_for_heap != -1) {
 282         os::unmap_memory(real_base, real_size);
 283       } else {
 284         os::release_memory_special(real_base, real_size);
 285       }
 286     } else{
 287       os::release_memory(real_base, real_size);
 288     }
 289     _base = NULL;




 290     _size = 0;
 291     _noaccess_prefix = 0;
 292     _alignment = 0;
 293     _special = false;
 294     _executable = false;
 295   }
 296 }
 297 
 298 static size_t noaccess_prefix_size(size_t alignment) {
 299   return lcm(os::vm_page_size(), alignment);
 300 }
 301 
 302 void ReservedHeapSpace::establish_noaccess_prefix() {
 303   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 304   _noaccess_prefix = noaccess_prefix_size(_alignment);
 305 
 306   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 307     if (true
 308         WIN64_ONLY(&& !UseLargePages)
 309         AIX_ONLY(&& os::vm_page_size() != 64*K)) {
 310       // Protect memory at the base of the allocated region.
 311       // If special, the page was committed (only matters on windows)
 312       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 313         fatal("cannot protect protection page");
 314       }
 315       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
 316                                  PTR_FORMAT " / " INTX_FORMAT " bytes",
 317                                  p2i(_base),
 318                                  _noaccess_prefix);
 319       assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
 320     } else {
 321       Universe::set_narrow_oop_use_implicit_null_checks(false);
 322     }
 323   }

 324   _base += _noaccess_prefix;
 325   _size -= _noaccess_prefix;
 326   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 327 }
 328 
 329 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 330 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 331 // might still fulfill the wishes of the caller.
 332 // Assures the memory is aligned to 'alignment'.
 333 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 334 void ReservedHeapSpace::try_reserve_heap(size_t size,
 335                                          size_t alignment,
 336                                          bool large,
 337                                          char* requested_address) {
 338   if (_base != NULL) {
 339     // We tried before, but we didn't like the address delivered.
 340     release();
 341   }
 342 







 343   // If OS doesn't support demand paging for large page memory, we need
 344   // to use reserve_memory_special() to reserve and pin the entire region.
 345   // If there is a backing file directory for this space then whether
 346   // large pages are allocated is up to the filesystem of the backing file.
 347   // So we ignore the UseLargePages flag in this case.
 348   bool special = large && !os::can_commit_large_page_memory();
 349   if (special && _fd_for_heap != -1) {
 350     special = false;
 351     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 352                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 353       log_debug(gc, heap)("Cannot allocate large pages for Java Heap when AllocateHeapAt option is set.");
 354     }
 355   }
 356   char* base = NULL;

 357 
 358   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
 359                              " heap of size " SIZE_FORMAT_HEX,
 360                              p2i(requested_address),
 361                              size);
 362 
 363   if (special) {
 364     base = os::reserve_memory_special(size, alignment, requested_address, false);
 365 
 366     if (base != NULL) {
 367       // Check alignment constraints.
 368       assert((uintptr_t) base % alignment == 0,
 369              "Large pages returned a non-aligned address, base: "
 370              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 371              p2i(base), alignment);
 372       _special = true;
 373     }
 374   }
 375 
 376   if (base == NULL) {
 377     // Failed; try to reserve regular memory below
 378     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 379                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 380       log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 381     }
 382 
 383     // Optimistically assume that the OSes returns an aligned base pointer.
 384     // When reserving a large address range, most OSes seem to align to at
 385     // least 64K.
 386 
 387     // If the memory was requested at a particular address, use
 388     // os::attempt_reserve_memory_at() to avoid over mapping something
 389     // important.  If available space is not detected, return NULL.
 390 
 391     if (requested_address != 0) {





 392       base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);






 393     } else {
 394       base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
 395     }
 396   }

 397   if (base == NULL) { return; }
 398 
 399   // Done
 400   _base = base;




 401   _size = size;

 402   _alignment = alignment;
 403 
 404   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 405   if (_fd_for_heap != -1 && AllocateOldGenAt == NULL) {
 406     _special = true;
 407   }
 408 
 409   // Check alignment constraints
 410   if ((((size_t)base) & (alignment - 1)) != 0) {
 411     // Base not aligned, retry.
 412     release();
 413   }
 414 }
 415 
 416 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 417                                           char *lowest_start,
 418                                           size_t attach_point_alignment,
 419                                           char *aligned_heap_base_min_address,
 420                                           char *upper_bound,
 421                                           size_t size,


 583     char** addresses = get_attach_addresses_for_disjoint_mode();
 584     int i = 0;
 585     while (addresses[i] &&                                 // End of array not yet reached.
 586            ((_base == NULL) ||                             // No previous try succeeded.
 587             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 588              !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 589       char* const attach_point = addresses[i];
 590       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 591       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 592       i++;
 593     }
 594 
 595     // Last, desperate try without any placement.
 596     if (_base == NULL) {
 597       log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
 598       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 599     }
 600   }
 601 }
 602 











 603 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* heap_allocation_directory) : ReservedSpace() {
 604 
 605   if (size == 0) {
 606     return;
 607   }
 608 
 609   // Open AllocateOldGenAt file
 610   if (AllocateOldGenAt != NULL) {
 611     _fd_for_heap = os::create_file_for_heap(AllocateOldGenAt);
 612     if (_fd_for_heap== -1) {
 613       vm_exit_during_initialization(
 614         err_msg("Could not create file for Heap at location %s", AllocateOldGenAt));
 615     }
 616     if (UseParallelOldGC) {
 617       // For ParallelOldGC, adaptive sizing picks _old_gen virtual space sizes as needed.
 618       // allocate Xmx on NVDIMM as adaptive sizing may put lot of pressure on NVDIMM.
 619       os::allocate_file(_fd_for_heap, MaxHeapSize);
 620       os::set_nvdimm_fd(_fd_for_heap);
 621       os::set_nvdimm_present(true);
 622     }
 623   } else {
 624     _fd_for_heap = -1;
 625   }
 626 
 627   if (heap_allocation_directory != NULL) {
 628     _fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
 629     if (_fd_for_heap == -1) {
 630       vm_exit_during_initialization(
 631         err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
 632     }
 633   }
 634 
 635   // Heap size should be aligned to alignment, too.
 636   guarantee(is_aligned(size, alignment), "set by caller");
 637 












 638   if (UseCompressedOops) {
 639     initialize_compressed_heap(size, alignment, large);
 640     if (_size > size) {
 641       // We allocated heap with noaccess prefix.
 642       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 643       // if we had to try at arbitrary address.
 644       establish_noaccess_prefix();
 645     }
 646   } else {



 647     initialize(size, alignment, large, NULL, false);
 648   }

 649 
 650   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 651          "area must be distinguishable from marks for mark-sweep");
 652   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 653          "area must be distinguishable from marks for mark-sweep");
 654 
 655   if (base() != NULL) {
 656     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 657     if (AllocateOldGenAt != NULL && _fd_for_heap != -1) {
 658       os::set_nvdimm_present(true);
 659       os::set_nvdimm_heapbase((address)_base);

 660       os::set_nvdimm_fd(_fd_for_heap);



 661     }
 662   }
 663 
 664   if (_fd_for_heap != -1 && AllocateOldGenAt == NULL) {
 665     os::close(_fd_for_heap);
 666   }
 667 }
 668 
 669 // Reserve space for code segment.  Same as Java heap only we mark this as
 670 // executable.
 671 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 672                                      size_t rs_align,
 673                                      bool large) :
 674   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 675   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 676 }
 677 
 678 // VirtualSpace
 679 
 680 VirtualSpace::VirtualSpace() {


< prev index next >