< prev index next >

src/hotspot/share/memory/virtualspace.cpp

Print this page




  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "logging/log.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "memory/virtualspace.hpp"
  29 #include "oops/markOop.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "runtime/os.inline.hpp"
  32 #include "services/memTracker.hpp"
  33 #include "utilities/align.hpp"
  34 
  35 // ReservedSpace
  36 
  37 // Dummy constructor
  38 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0), 
  39     _nvdimm_base_nv(NULL), _nvdimm_size(0), _dram_size(0),
  40     _alignment(0), _special(false), _executable(false), _fd_for_heap(-1), _fd_for_nvdimm(-1) {
  41 }
  42 
  43 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1), _fd_for_nvdimm(-1), 
  44     _nvdimm_base_nv(NULL), _nvdimm_size(0), _dram_size(0) {
  45   bool has_preferred_page_size = preferred_page_size != 0;
  46   // Want to use large pages where possible and pad with small pages.
  47   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
  48   bool large_pages = page_size != (size_t)os::vm_page_size();
  49   size_t alignment;
  50   if (large_pages && has_preferred_page_size) {
  51     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  52     // ReservedSpace initialization requires size to be aligned to the given
  53     // alignment. Align the size up.
  54     size = align_up(size, alignment);
  55   } else {
  56     // Don't force the alignment to be large page aligned,
  57     // since that will waste memory.
  58     alignment = os::vm_allocation_granularity();
  59   }
  60   initialize(size, alignment, large_pages, NULL, false);
  61 }
  62 
  63 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  64                              bool large,
  65                              char* requested_address) : _fd_for_heap(-1), _fd_for_nvdimm(-1), 
  66                              _nvdimm_base_nv(NULL), _nvdimm_size(0), _dram_size(0) {
  67   initialize(size, alignment, large, requested_address, false);
  68 }
  69 
  70 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  71                              bool large,
  72                              bool executable) : _fd_for_heap(-1), _fd_for_nvdimm(-1), 
  73                              _nvdimm_base_nv(NULL), _nvdimm_size(0), _dram_size(0) {
  74   initialize(size, alignment, large, NULL, executable);
  75 }
  76 
  77 // Helper method
  78 static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) {
  79   if (is_file_mapped) {
  80     if (!os::unmap_memory(base, size)) {
  81       fatal("os::unmap_memory failed");
  82     }
  83   } else if (!os::release_memory(base, size)) {
  84     fatal("os::release_memory failed");
  85   }
  86 }
  87 
  88 // Helper method.
  89 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  90                                            const size_t size, bool special, bool is_file_mapped = false)
  91 {
  92   if (base == requested_address || requested_address == NULL)


 155 
 156     if (base != NULL) {
 157       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 158         // OS ignored requested address. Try different address.
 159         return;
 160       }
 161       // Check alignment constraints.
 162       assert((uintptr_t) base % alignment == 0,
 163              "Large pages returned a non-aligned address, base: "
 164              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 165              p2i(base), alignment);
 166       _special = true;
 167     } else {
 168       // failed; try to reserve regular memory below
 169       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 170                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 171         log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 172       }
 173     }
 174   }
 175 




 176   if (base == NULL) {
 177     // Optimistically assume that the OSes returns an aligned base pointer.
 178     // When reserving a large address range, most OSes seem to align to at
 179     // least 64K.
 180 
 181     // If the memory was requested at a particular address, use
 182     // os::attempt_reserve_memory_at() to avoid over mapping something
 183     // important.  If available space is not detected, return NULL.
 184 
 185     if (requested_address != 0) {
 186       base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
 187       if (failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
 188         // OS ignored requested address. Try different address.
 189         base = NULL;
 190       }
 191     } else {
 192       if (_nvdimm_base_nv != NULL && _fd_for_nvdimm != -1) {
 193         base = os::reserve_memory(_dram_size, _nvdimm_base_nv, alignment, _fd_for_heap);
 194       } else {
 195         base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
 196       }
 197     }
 198 
 199     if (base == NULL) return;
 200 
 201     // Check alignment constraints
 202     if ((((size_t)base) & (alignment - 1)) != 0) {
 203       // Base not aligned, retry
 204       unmap_or_release_memory(base, size, _fd_for_heap != -1 /*is_file_mapped*/);
 205 
 206       // Make sure that size is aligned
 207       size = align_up(size, alignment);
 208       base = os::reserve_memory_aligned(size, alignment, _fd_for_heap);
 209 
 210       if (requested_address != 0 &&
 211           failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
 212         // As a result of the alignment constraints, the allocated base differs
 213         // from the requested address. Return back to the caller who can
 214         // take remedial action (like try again without a requested address).
 215         assert(_base == NULL, "should be");
 216         return;
 217       }
 218     }
 219   }
 220   // Done
 221   _base = base;
 222   _nvdimm_base = _base-_nvdimm_size;
 223   _nvdimm_base_nv = NULL;
 224   _dram_size = (size_t)size;
 225   _size = size;
 226   _alignment = alignment;
 227   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 228   if (_fd_for_heap != -1) {
 229     _special = true;
 230   }
 231 }
 232 
 233 
 234 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 235                              bool special, bool executable) {
 236   assert((size % os::vm_allocation_granularity()) == 0,
 237          "size not allocation aligned");
 238   _base = base;
 239   _size = size;
 240   _nvdimm_base = NULL; 
 241   _nvdimm_base_nv = NULL;
 242   _dram_size = (size_t)size;
 243   _alignment = alignment;
 244   _noaccess_prefix = 0;
 245   _special = special;
 246   _executable = executable;
 247 }
 248 


 275 size_t ReservedSpace::page_align_size_down(size_t size) {
 276   return align_down(size, os::vm_page_size());
 277 }
 278 
 279 
 280 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 281   return align_up(size, os::vm_allocation_granularity());
 282 }
 283 
 284 
 285 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 286   return align_down(size, os::vm_allocation_granularity());
 287 }
 288 
 289 
 290 void ReservedSpace::release() {
 291   if (is_reserved()) {
 292     char *real_base = _base - _noaccess_prefix;
 293     const size_t real_size = _size + _noaccess_prefix;
 294     // unmap nvdimm
 295     if (_fd_for_nvdimm != -1) {
 296       os::unmap_memory(real_base+real_size, _nvdimm_size);
 297     }
 298     if (special()) {
 299       if (_fd_for_heap != -1) {
 300         os::unmap_memory(real_base, real_size);
 301       } else {
 302         os::release_memory_special(real_base, real_size);
 303       }
 304     } else{
 305       os::release_memory(real_base, real_size);
 306     }
 307     _base = NULL;
 308     _nvdimm_base = NULL;
 309     _nvdimm_base_nv = NULL;
 310     _dram_size = 0;
 311     _nvdimm_size = 0;
 312     _size = 0;
 313     _noaccess_prefix = 0;
 314     _alignment = 0;
 315     _special = false;
 316     _executable = false;


 346 
 347   _base += _noaccess_prefix;
 348   _size -= _noaccess_prefix;
 349   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 350 }
 351 
 352 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 353 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 354 // might still fulfill the wishes of the caller.
 355 // Assures the memory is aligned to 'alignment'.
 356 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 357 void ReservedHeapSpace::try_reserve_heap(size_t size,
 358                                          size_t alignment,
 359                                          bool large,
 360                                          char* requested_address) {
 361   if (_base != NULL) {
 362     // We tried before, but we didn't like the address delivered.
 363     release();
 364   }
 365 
 366   if (_fd_for_nvdimm != -1 && UseG1GC) {
 367     char* base_nv = os::reserve_memory(size, requested_address, alignment);
 368     initialize_g1gc_nvdimm_dram_sizes(size, alignment);
 369     _nvdimm_base_nv = base_nv+_nvdimm_size; // hint for allocation address of DRAM COMPRESSED HEAP.
 370   }
 371 

 372   // If OS doesn't support demand paging for large page memory, we need
 373   // to use reserve_memory_special() to reserve and pin the entire region.
 374   // If there is a backing file directory for this space then whether
 375   // large pages are allocated is up to the filesystem of the backing file.
 376   // So we ignore the UseLargePages flag in this case.
 377   bool special = large && !os::can_commit_large_page_memory();
 378   if (special && _fd_for_heap != -1) {
 379     special = false;
 380     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 381                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 382       log_debug(gc, heap)("Cannot allocate large pages for Java Heap when AllocateHeapAt option is set.");
 383     }
 384   }
 385   char* base = NULL;
 386   char* nvdimm_base = NULL;
 387 
 388   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
 389                              " heap of size " SIZE_FORMAT_HEX,
 390                              p2i(requested_address),
 391                              size);


 402       _special = true;
 403     }
 404   }
 405 
 406   if (base == NULL) {
 407     // Failed; try to reserve regular memory below
 408     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 409                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 410       log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 411     }
 412 
 413     // Optimistically assume that the OSes returns an aligned base pointer.
 414     // When reserving a large address range, most OSes seem to align to at
 415     // least 64K.
 416 
 417     // If the memory was requested at a particular address, use
 418     // os::attempt_reserve_memory_at() to avoid over mapping something
 419     // important.  If available space is not detected, return NULL.
 420 
 421     if (requested_address != 0) {
 422       if (_nvdimm_base_nv != NULL && _fd_for_nvdimm != -1) {
 423         // first unmap so that OS does not keep trying.
 424         os::unmap_memory(_nvdimm_base_nv, _dram_size);
 425         base = os::attempt_reserve_memory_at(_dram_size, _nvdimm_base_nv);
 426       } else {
 427         base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
 428       }
 429     } else {
 430       if (_nvdimm_base_nv != NULL && _fd_for_nvdimm != -1) {
 431         // first unmap so that OS does not keep trying.
 432         os::unmap_memory(_nvdimm_base_nv, _dram_size);
 433         base = os::reserve_memory(_dram_size, _nvdimm_base_nv, alignment);
 434       } else {
 435         base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
 436       }
 437     }
 438   }
 439   if (base == NULL) { return; }
 440 
 441   // Done
 442   _base = base;
 443   _nvdimm_base = _base-_nvdimm_size;
 444   if (_nvdimm_base_nv != NULL && _fd_for_nvdimm != -1) {
 445     _size = _dram_size;
 446   } else {
 447     _size = size;
 448   }
 449   _alignment = alignment;
 450 
 451   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 452   if (_fd_for_heap != -1) {
 453     _special = true;
 454   }
 455 
 456   // Check alignment constraints
 457   if ((((size_t)base) & (alignment - 1)) != 0) {
 458     // Base not aligned, retry.
 459     release();
 460   }
 461 }
 462 
 463 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 464                                           char *lowest_start,
 465                                           size_t attach_point_alignment,
 466                                           char *aligned_heap_base_min_address,
 467                                           char *upper_bound,
 468                                           size_t size,
 469                                           size_t alignment,
 470                                           bool large) {
 471   const size_t attach_range = highest_start - lowest_start;
 472   // Cap num_attempts at possible number.


 649 
 650 void ReservedHeapSpace::initialize_g1gc_nvdimm_dram_sizes(size_t size, size_t alignment) {
 651   _dram_size = (size_t)((size * G1MaxNewSizePercent)/100);
 652   size_t page_sz = os::vm_page_size() -1 ;
 653   _dram_size = (_dram_size + page_sz) & (~page_sz);
 654   // align sizes.
 655   _dram_size = align_down(_dram_size, alignment);
 656   _nvdimm_size = size - _dram_size;
 657   _nvdimm_size = (_nvdimm_size + page_sz) & (~page_sz);
 658   _nvdimm_size = align_down(_nvdimm_size, alignment);
 659 }
 660 
 661 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* heap_allocation_directory) : ReservedSpace() {
 662 
 663   if (size == 0) {
 664     return;
 665   }
 666 
 667   // if AllocateOldGen is used  
 668   if (AllocateOldGenAt != NULL) {
 669     _fd_for_nvdimm = os::create_file_for_heap(AllocateOldGenAt);
 670     if (_fd_for_nvdimm == -1) {
 671       vm_exit_during_initialization(
 672         err_msg("Could not create file for Heap at location %s", AllocateOldGenAt));
 673     }
 674     if (UseParallelOldGC) {
 675       // For ParallelOldGC, adaptive sizing picks _old_gen virtual space sizes as needed.
 676       // allocate Xmx on NVDIMM as adaptive sizing may put lot of pressure on NVDIMM.
 677       os::allocate_file(_fd_for_nvdimm, MaxHeapSize);
 678       os::set_nvdimm_fd(_fd_for_nvdimm);
 679       os::set_nvdimm_present(true);
 680     }
 681   } else {
 682     _fd_for_nvdimm = -1;
 683   }
 684 
 685   if (heap_allocation_directory != NULL) {
 686     _fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
 687     if (_fd_for_heap == -1) {
 688       vm_exit_during_initialization(
 689         err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
 690     }
 691   }
 692 
 693   // Heap size should be aligned to alignment, too.
 694   guarantee(is_aligned(size, alignment), "set by caller");
 695 
 696   char* base_nv = NULL;
 697   _nvdimm_base_nv = NULL;
 698   
 699   if (_fd_for_nvdimm != -1 && UseG1GC) {
 700     if (!UseCompressedOops) {
 701       // if compressed oops use requested address.
 702       initialize_g1gc_nvdimm_dram_sizes(size, alignment);
 703       base_nv = os::reserve_memory(size, NULL, alignment);
 704       _nvdimm_base_nv = base_nv+_nvdimm_size; // hint for allocation address of DRAM heap
 705     }
 706   }
 707 
 708   if (UseCompressedOops) {
 709     initialize_compressed_heap(size, alignment, large);
 710     if (_size > size) {
 711       // We allocated heap with noaccess prefix.
 712       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 713       // if we had to try at arbitrary address.
 714       establish_noaccess_prefix();
 715     }
 716   } else {
 717     if (_fd_for_nvdimm != -1 && UseG1GC) {
 718       initialize(_dram_size, alignment, large, NULL, false);
 719     } else {
 720       initialize(size, alignment, large, NULL, false);
 721     }
 722   }
 723 
 724   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 725          "area must be distinguishable from marks for mark-sweep");
 726   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 727          "area must be distinguishable from marks for mark-sweep");
 728 
 729   if (base() != NULL) {
 730     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 731     if (_fd_for_nvdimm != -1 && UseG1GC) {
 732       os::set_nvdimm_present(true);
 733       os::set_dram_heapbase((address)_base);
 734       os::set_nvdimm_heapbase((address)_nvdimm_base);
 735       os::set_nvdimm_fd(_fd_for_nvdimm);
 736       _size += _nvdimm_size;
 737       _base = _nvdimm_base;
 738       log_info(gc, heap)("Java DRAM Heap at [%p - %p] & NVDIMM Old Gen at [%p - %p] %ld \n", _nvdimm_base+_nvdimm_size, (char*)(_nvdimm_base+_nvdimm_size+_dram_size), _nvdimm_base, (char*)(_nvdimm_base+_nvdimm_size), size);
 739     }
 740   }
 741 
 742   if (_fd_for_heap != -1) {
 743     os::close(_fd_for_heap);
 744   }
 745 }
 746 
 747 // Reserve space for code segment.  Same as Java heap only we mark this as
 748 // executable.
 749 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 750                                      size_t rs_align,
 751                                      bool large) :
 752   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 753   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 754 }
 755 
 756 // VirtualSpace
 757 
 758 VirtualSpace::VirtualSpace() {
 759   _low_boundary           = NULL;
 760   _high_boundary          = NULL;
 761   _low                    = NULL;
 762   _high                   = NULL;




  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "logging/log.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "memory/virtualspace.hpp"
  29 #include "oops/markOop.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "runtime/os.inline.hpp"
  32 #include "services/memTracker.hpp"
  33 #include "utilities/align.hpp"
  34 
  35 // ReservedSpace
  36 
  37 // Dummy constructor
  38 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0), 
  39     _nvdimm_base_nv(NULL), _nvdimm_size(0), _dram_size(0),
  40     _alignment(0), _special(false), _executable(false), _fd_for_heap(-1) {
  41 }
  42 
  43 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1), 
  44     _nvdimm_base_nv(NULL), _nvdimm_size(0), _dram_size(0) {
  45   bool has_preferred_page_size = preferred_page_size != 0;
  46   // Want to use large pages where possible and pad with small pages.
  47   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
  48   bool large_pages = page_size != (size_t)os::vm_page_size();
  49   size_t alignment;
  50   if (large_pages && has_preferred_page_size) {
  51     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  52     // ReservedSpace initialization requires size to be aligned to the given
  53     // alignment. Align the size up.
  54     size = align_up(size, alignment);
  55   } else {
  56     // Don't force the alignment to be large page aligned,
  57     // since that will waste memory.
  58     alignment = os::vm_allocation_granularity();
  59   }
  60   initialize(size, alignment, large_pages, NULL, false);
  61 }
  62 
  63 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  64                              bool large,
  65                              char* requested_address) : _fd_for_heap(-1), 
  66                              _nvdimm_base_nv(NULL), _nvdimm_size(0), _dram_size(0) {
  67   initialize(size, alignment, large, requested_address, false);
  68 }
  69 
  70 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  71                              bool large,
  72                              bool executable) : _fd_for_heap(-1), 
  73                              _nvdimm_base_nv(NULL), _nvdimm_size(0), _dram_size(0) {
  74   initialize(size, alignment, large, NULL, executable);
  75 }
  76 
  77 // Helper method
  78 static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) {
  79   if (is_file_mapped) {
  80     if (!os::unmap_memory(base, size)) {
  81       fatal("os::unmap_memory failed");
  82     }
  83   } else if (!os::release_memory(base, size)) {
  84     fatal("os::release_memory failed");
  85   }
  86 }
  87 
  88 // Helper method.
  89 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  90                                            const size_t size, bool special, bool is_file_mapped = false)
  91 {
  92   if (base == requested_address || requested_address == NULL)


 155 
 156     if (base != NULL) {
 157       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 158         // OS ignored requested address. Try different address.
 159         return;
 160       }
 161       // Check alignment constraints.
 162       assert((uintptr_t) base % alignment == 0,
 163              "Large pages returned a non-aligned address, base: "
 164              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 165              p2i(base), alignment);
 166       _special = true;
 167     } else {
 168       // failed; try to reserve regular memory below
 169       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 170                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 171         log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 172       }
 173     }
 174   }
 175   int fd = -1;
 176   if (AllocateOldGenAt == NULL && _fd_for_heap != -1) {
 177     // AllocateHeapAt is in use.
 178     fd = _fd_for_heap;
 179   }
 180   if (base == NULL) {
 181     // Optimistically assume that the OSes returns an aligned base pointer.
 182     // When reserving a large address range, most OSes seem to align to at
 183     // least 64K.
 184 
 185     // If the memory was requested at a particular address, use
 186     // os::attempt_reserve_memory_at() to avoid over mapping something
 187     // important.  If available space is not detected, return NULL.
 188 
 189     if (requested_address != 0) {
 190       base = os::attempt_reserve_memory_at(size, requested_address, fd);
 191       if (failed_to_reserve_as_requested(base, requested_address, size, false, fd != -1)) {
 192         // OS ignored requested address. Try different address.
 193         base = NULL;
 194       }
 195     } else {
 196       if (_nvdimm_base_nv != NULL && _fd_for_heap != -1) {
 197         base = os::reserve_memory(_dram_size, _nvdimm_base_nv, alignment, fd);
 198       } else {
 199         base = os::reserve_memory(size, NULL, alignment, fd);
 200       }
 201     }
 202 
 203     if (base == NULL) return;
 204 
 205     // Check alignment constraints
 206     if ((((size_t)base) & (alignment - 1)) != 0) {
 207       // Base not aligned, retry
 208       unmap_or_release_memory(base, size, fd != -1 /*is_file_mapped*/);
 209 
 210       // Make sure that size is aligned
 211       size = align_up(size, alignment);
 212       base = os::reserve_memory_aligned(size, alignment, fd);
 213 
 214       if (requested_address != 0 &&
 215           failed_to_reserve_as_requested(base, requested_address, size, false, fd != -1)) {
 216         // As a result of the alignment constraints, the allocated base differs
 217         // from the requested address. Return back to the caller who can
 218         // take remedial action (like try again without a requested address).
 219         assert(_base == NULL, "should be");
 220         return;
 221       }
 222     }
 223   }
 224   // Done
 225   _base = base;
 226   _nvdimm_base = _base-_nvdimm_size;
 227   _nvdimm_base_nv = NULL;
 228   _dram_size = (size_t)size;
 229   _size = size;
 230   _alignment = alignment;
 231   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 232   if (fd != -1) {
 233     _special = true;
 234   }
 235 }
 236 
 237 
 238 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 239                              bool special, bool executable) {
 240   assert((size % os::vm_allocation_granularity()) == 0,
 241          "size not allocation aligned");
 242   _base = base;
 243   _size = size;
 244   _nvdimm_base = NULL; 
 245   _nvdimm_base_nv = NULL;
 246   _dram_size = (size_t)size;
 247   _alignment = alignment;
 248   _noaccess_prefix = 0;
 249   _special = special;
 250   _executable = executable;
 251 }
 252 


 279 size_t ReservedSpace::page_align_size_down(size_t size) {
 280   return align_down(size, os::vm_page_size());
 281 }
 282 
 283 
 284 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 285   return align_up(size, os::vm_allocation_granularity());
 286 }
 287 
 288 
 289 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 290   return align_down(size, os::vm_allocation_granularity());
 291 }
 292 
 293 
 294 void ReservedSpace::release() {
 295   if (is_reserved()) {
 296     char *real_base = _base - _noaccess_prefix;
 297     const size_t real_size = _size + _noaccess_prefix;
 298     // unmap nvdimm
 299     if (_nvdimm_base != NULL) {
 300       os::unmap_memory(_nvdimm_base, _nvdimm_size);
 301     }
 302     if (special()) {
 303       if (_fd_for_heap != -1) {
 304         os::unmap_memory(real_base, real_size);
 305       } else {
 306         os::release_memory_special(real_base, real_size);
 307       }
 308     } else{
 309       os::release_memory(real_base, real_size);
 310     }
 311     _base = NULL;
 312     _nvdimm_base = NULL;
 313     _nvdimm_base_nv = NULL;
 314     _dram_size = 0;
 315     _nvdimm_size = 0;
 316     _size = 0;
 317     _noaccess_prefix = 0;
 318     _alignment = 0;
 319     _special = false;
 320     _executable = false;


 350 
 351   _base += _noaccess_prefix;
 352   _size -= _noaccess_prefix;
 353   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 354 }
 355 
 356 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 357 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 358 // might still fulfill the wishes of the caller.
 359 // Assures the memory is aligned to 'alignment'.
 360 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 361 void ReservedHeapSpace::try_reserve_heap(size_t size,
 362                                          size_t alignment,
 363                                          bool large,
 364                                          char* requested_address) {
 365   if (_base != NULL) {
 366     // We tried before, but we didn't like the address delivered.
 367     release();
 368   }
 369 
 370   if (_fd_for_heap != -1 && UseG1GC && AllocateOldGenAt != NULL) {
 371     char* base_nv = os::reserve_memory(size, requested_address, alignment);
 372     initialize_g1gc_nvdimm_dram_sizes(size, alignment);
 373     _nvdimm_base_nv = base_nv+_nvdimm_size; // hint for allocation address of DRAM COMPRESSED HEAP.
 374   }
 375 
 376   
 377   // If OS doesn't support demand paging for large page memory, we need
 378   // to use reserve_memory_special() to reserve and pin the entire region.
 379   // If there is a backing file directory for this space then whether
 380   // large pages are allocated is up to the filesystem of the backing file.
 381   // So we ignore the UseLargePages flag in this case.
 382   bool special = large && !os::can_commit_large_page_memory();
 383   if (special && _fd_for_heap != -1) {
 384     special = false;
 385     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 386                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 387       log_debug(gc, heap)("Cannot allocate large pages for Java Heap when AllocateHeapAt option is set.");
 388     }
 389   }
 390   char* base = NULL;
 391   char* nvdimm_base = NULL;
 392 
 393   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
 394                              " heap of size " SIZE_FORMAT_HEX,
 395                              p2i(requested_address),
 396                              size);


 407       _special = true;
 408     }
 409   }
 410 
 411   if (base == NULL) {
 412     // Failed; try to reserve regular memory below
 413     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 414                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 415       log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 416     }
 417 
 418     // Optimistically assume that the OSes returns an aligned base pointer.
 419     // When reserving a large address range, most OSes seem to align to at
 420     // least 64K.
 421 
 422     // If the memory was requested at a particular address, use
 423     // os::attempt_reserve_memory_at() to avoid over mapping something
 424     // important.  If available space is not detected, return NULL.
 425 
 426     if (requested_address != 0) {
 427       if (_nvdimm_base_nv != NULL && _fd_for_heap != -1 && AllocateOldGenAt != NULL) {
 428         // first unmap so that OS does not keep trying.
 429         os::unmap_memory(_nvdimm_base_nv, _dram_size);
 430         base = os::attempt_reserve_memory_at(_dram_size, _nvdimm_base_nv);
 431       } else {
 432         base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
 433       }
 434     } else {
 435       if (_nvdimm_base_nv != NULL && _fd_for_heap != -1 && AllocateOldGenAt != NULL) {
 436         // first unmap so that OS does not keep trying.
 437         os::unmap_memory(_nvdimm_base_nv, _dram_size);
 438         base = os::reserve_memory(_dram_size, _nvdimm_base_nv, alignment);
 439       } else {
 440         base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
 441       }
 442     }
 443   }
 444   if (base == NULL) { return; }
 445 
 446   // Done
 447   _base = base;
 448   _nvdimm_base = _base-_nvdimm_size;
 449   if (_nvdimm_base_nv != NULL && _fd_for_heap != -1 && AllocateOldGenAt != NULL) {
 450     _size = _dram_size;
 451   } else {
 452     _size = size;
 453   }
 454   _alignment = alignment;
 455 
 456   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 457   if (_fd_for_heap != -1 && AllocateOldGenAt == NULL) {
 458     _special = true;
 459   }
 460 
 461   // Check alignment constraints
 462   if ((((size_t)base) & (alignment - 1)) != 0) {
 463     // Base not aligned, retry.
 464     release();
 465   }
 466 }
 467 
 468 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 469                                           char *lowest_start,
 470                                           size_t attach_point_alignment,
 471                                           char *aligned_heap_base_min_address,
 472                                           char *upper_bound,
 473                                           size_t size,
 474                                           size_t alignment,
 475                                           bool large) {
 476   const size_t attach_range = highest_start - lowest_start;
 477   // Cap num_attempts at possible number.


 654 
 655 void ReservedHeapSpace::initialize_g1gc_nvdimm_dram_sizes(size_t size, size_t alignment) {
 656   _dram_size = (size_t)((size * G1MaxNewSizePercent)/100);
 657   size_t page_sz = os::vm_page_size() -1 ;
 658   _dram_size = (_dram_size + page_sz) & (~page_sz);
 659   // align sizes.
 660   _dram_size = align_down(_dram_size, alignment);
 661   _nvdimm_size = size - _dram_size;
 662   _nvdimm_size = (_nvdimm_size + page_sz) & (~page_sz);
 663   _nvdimm_size = align_down(_nvdimm_size, alignment);
 664 }
 665 
 666 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* heap_allocation_directory) : ReservedSpace() {
 667 
 668   if (size == 0) {
 669     return;
 670   }
 671 
 672   // if AllocateOldGen is used  
 673   if (AllocateOldGenAt != NULL) {
 674     _fd_for_heap = os::create_file_for_heap(AllocateOldGenAt);
 675     if (_fd_for_heap== -1) {
 676       vm_exit_during_initialization(
 677         err_msg("Could not create file for Heap at location %s", AllocateOldGenAt));
 678     }
 679     if (UseParallelOldGC) {
 680       // For ParallelOldGC, adaptive sizing picks _old_gen virtual space sizes as needed.
 681       // allocate Xmx on NVDIMM as adaptive sizing may put lot of pressure on NVDIMM.
 682       os::allocate_file(_fd_for_heap, MaxHeapSize);
 683       os::set_nvdimm_fd(_fd_for_heap);
 684       os::set_nvdimm_present(true);
 685     }
 686   } else {
 687     _fd_for_heap = -1;
 688   }
 689 
 690   if (heap_allocation_directory != NULL) {
 691     _fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
 692     if (_fd_for_heap == -1) {
 693       vm_exit_during_initialization(
 694         err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
 695     }
 696   }
 697 
 698   // Heap size should be aligned to alignment, too.
 699   guarantee(is_aligned(size, alignment), "set by caller");
 700 
 701   char* base_nv = NULL;
 702   _nvdimm_base_nv = NULL;
 703   
 704   if (_fd_for_heap != -1 && UseG1GC && AllocateOldGenAt != NULL) {
 705     if (!UseCompressedOops) {
 706       // if compressed oops use requested address.
 707       initialize_g1gc_nvdimm_dram_sizes(size, alignment);
 708       base_nv = os::reserve_memory(size, NULL, alignment);
 709       _nvdimm_base_nv = base_nv+_nvdimm_size; // hint for allocation address of DRAM heap
 710     }
 711   }
 712 
 713   if (UseCompressedOops) {
 714     initialize_compressed_heap(size, alignment, large);
 715     if (_size > size) {
 716       // We allocated heap with noaccess prefix.
 717       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 718       // if we had to try at arbitrary address.
 719       establish_noaccess_prefix();
 720     }
 721   } else {
 722     if (_fd_for_heap != -1 && UseG1GC && AllocateOldGenAt != NULL) {
 723       initialize(_dram_size, alignment, large, NULL, false);
 724     } else {
 725       initialize(size, alignment, large, NULL, false);
 726     }
 727   }
 728 
 729   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 730          "area must be distinguishable from marks for mark-sweep");
 731   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 732          "area must be distinguishable from marks for mark-sweep");
 733 
 734   if (base() != NULL) {
 735     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 736     if (_fd_for_heap != -1 && UseG1GC && AllocateOldGenAt != NULL) {
 737       os::set_nvdimm_present(true);
 738       os::set_dram_heapbase((address)_base);
 739       os::set_nvdimm_heapbase((address)_nvdimm_base);
 740       os::set_nvdimm_fd(_fd_for_heap);
 741       _size += _nvdimm_size;
 742       _base = _nvdimm_base;
 743       log_info(gc, heap)("Java DRAM Heap at [%p - %p] & NVDIMM Old Gen at [%p - %p] %ld \n", _nvdimm_base+_nvdimm_size, (char*)(_nvdimm_base+_nvdimm_size+_dram_size), _nvdimm_base, (char*)(_nvdimm_base+_nvdimm_size), size);
 744     }
 745   }
 746 
 747   if (_fd_for_heap != -1 && AllocateOldGenAt == NULL) {
 748     os::close(_fd_for_heap);
 749   }
 750 }
 751 
 752 // Reserve space for code segment.  Same as Java heap only we mark this as
 753 // executable.
 754 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 755                                      size_t rs_align,
 756                                      bool large) :
 757   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 758   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 759 }
 760 
 761 // VirtualSpace
 762 
 763 VirtualSpace::VirtualSpace() {
 764   _low_boundary           = NULL;
 765   _high_boundary          = NULL;
 766   _low                    = NULL;
 767   _high                   = NULL;


< prev index next >