--- old/os/posix/os_posix.cpp 2018-06-12 04:14:30.167190669 -0700 +++ new/os/posix/os_posix.cpp 2018-06-12 04:14:30.119190668 -0700 @@ -238,24 +238,37 @@ #endif } +// Allocates space for the file on diven device +int os::allocate_file(int file_desc, size_t size) { + // allocate space for the file + if (file_desc == -1) { + vm_exit_during_initialization(err_msg("Invalid file descriptor passed (%d)", file_desc)); + } + int ret = util_posix_fallocate(file_desc, 0, (off_t)size); + if (ret != 0) { + vm_exit_during_initialization(err_msg("Could not allocate file to map Java heap on given filesystem... error(%d)", ret)); + } + return ret; +} + // Map the given address range to the provided file descriptor. -char* os::map_memory_to_file(char* base, size_t size, int fd) { +char* os::map_memory_to_file(char* base, size_t size, int fd, int offset, bool exec, bool allocate) { assert(fd != -1, "File descriptor is not valid"); // allocate space for the file - int ret = util_posix_fallocate(fd, 0, (off_t)size); - if (ret != 0) { - vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory. error(%d)", ret)); - return NULL; + if (allocate == true) { + if (os::allocate_file(fd, size) != 0) { + return NULL; + } } - int prot = PROT_READ | PROT_WRITE; + int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; int flags = MAP_SHARED; if (base != NULL) { flags |= MAP_FIXED; } - char* addr = (char*)mmap(base, size, prot, flags, fd, 0); + char* addr = (char*)mmap(base, size, prot, flags, fd, offset); if (addr == MAP_FAILED) { warning("Failed mmap to file. (%s)", os::strerror(errno)); return NULL; --- old/os/windows/os_windows.cpp 2018-06-12 04:14:30.723190685 -0700 +++ new/os/windows/os_windows.cpp 2018-06-12 04:14:30.671190683 -0700 @@ -2984,7 +2984,7 @@ } // If 'base' is not NULL, function will return NULL if it cannot get 'base' -char* os::map_memory_to_file(char* base, size_t size, int fd) { +char* os::map_memory_to_file(char* base, size_t size, int fd, int offset, bool exec, bool allocate) { assert(fd != -1, "File descriptor is not valid"); HANDLE fh = (HANDLE)_get_osfhandle(fd); --- old/share/gc/cms/cmsArguments.cpp 2018-06-12 04:14:31.215190698 -0700 +++ new/share/gc/cms/cmsArguments.cpp 2018-06-12 04:14:31.159190697 -0700 @@ -86,6 +86,10 @@ assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC, "Error"); assert(UseConcMarkSweepGC, "CMS is expected to be on here"); + if (AllocateOldGenAt != NULL) { + vm_exit_during_initialization("The flag -XX:AllocateOldGenAt can not be used with CMS. Only ParallelOldGC and G1GC are supported.", NULL); + } + // CMS space iteration, which FLSVerifyAllHeapreferences entails, // insists that we hold the requisite locks so that the iteration is // MT-safe. For the verification at start-up and shut-down, we don't --- old/share/gc/g1/g1CollectedHeap.cpp 2018-06-12 04:14:31.739190713 -0700 +++ new/share/gc/g1/g1CollectedHeap.cpp 2018-06-12 04:14:31.703190712 -0700 @@ -1316,8 +1316,7 @@ bool G1CollectedHeap::expand(size_t expand_bytes, WorkGang* pretouch_workers, double* expand_time_ms) { size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); - aligned_expand_bytes = align_up(aligned_expand_bytes, - HeapRegion::GrainBytes); + aligned_expand_bytes = align_up(aligned_expand_bytes, HeapRegion::GrainBytes); log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount: " SIZE_FORMAT "B expansion amount: " SIZE_FORMAT "B", expand_bytes, aligned_expand_bytes); @@ -1563,7 +1562,7 @@ _hot_card_cache = new G1HotCardCache(this); // Carve out the G1 part of the heap. - ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); + ReservedSpace g1_rs = heap_rs.first_part(heap_rs.size()); size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size(); G1RegionToSpaceMapper* heap_storage = G1RegionToSpaceMapper::create_mapper(g1_rs, @@ -1646,12 +1645,23 @@ } _cm_thread = _cm->cm_thread(); + // Expand NVDIMM to maximum old gen size. + size_t aligned_expand_bytes = 0; + if (os::has_nvdimm()) { + aligned_expand_bytes = expand_old_gen_on_nvdimm(max_byte_size); + } // Now expand into the initial heap size. if (!expand(init_byte_size, _workers)) { vm_shutdown_during_initialization("Failed to allocate initial heap."); return JNI_ENOMEM; } + if (os::has_nvdimm()) { + // Show how much memory was committed on NVDIMM and DRAM. + log_info(gc, heap)("NVDIMM Reserved Bytes : %ld DRAM Reserved Bytes : %ld \n", aligned_expand_bytes, init_byte_size); + log_info(gc, heap)("When NVDIMM is present, we always reserve and commit Maximum OldGen Size on NVDIMM"); + log_info(gc, heap)("JVM will have more size reserved and committed than specified by Xmn or Xms options (but never more than Xmx)."); + } // Perform any initialization actions delegated to the policy. g1_policy()->init(this, &_collection_set); @@ -1713,6 +1723,16 @@ return JNI_OK; } +size_t G1CollectedHeap::expand_old_gen_on_nvdimm(size_t max_byte_size) { + size_t nvdimm_bytes = max_byte_size - (size_t)(max_byte_size * G1MaxNewSizePercent)/100; + size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(nvdimm_bytes); + aligned_expand_bytes = align_up(aligned_expand_bytes, HeapRegion::GrainBytes); + uint nvdimm_regions = (uint)(aligned_expand_bytes/HeapRegion::GrainBytes); + os::set_nvdimm_regionlength(nvdimm_regions); + expand(aligned_expand_bytes, _workers); + return aligned_expand_bytes; +} + void G1CollectedHeap::initialize_serviceability() { _eden_pool = new G1EdenPool(this); _survivor_pool = new G1SurvivorPool(this); --- old/share/gc/g1/g1CollectedHeap.hpp 2018-06-12 04:14:32.155190724 -0700 +++ new/share/gc/g1/g1CollectedHeap.hpp 2018-06-12 04:14:32.127190724 -0700 @@ -1386,6 +1386,7 @@ private: void print_heap_regions() const; void print_regions_on(outputStream* st) const; + size_t expand_old_gen_on_nvdimm(size_t max_byte_size); public: virtual void print_on(outputStream* st) const; --- old/share/gc/g1/g1PageBasedVirtualSpace.cpp 2018-06-12 04:14:32.499190734 -0700 +++ new/share/gc/g1/g1PageBasedVirtualSpace.cpp 2018-06-12 04:14:32.459190733 -0700 @@ -134,6 +134,20 @@ char* start_addr = page_start(start); size_t size = num_pages * _page_size; + if (((address)start_addr == (address)os::nvdimm_heapbase())) { + // first remove my dummy mapping. + if (os::unmap_memory(start_addr, size)) { + char* nvdimm_addr = os::attempt_reserve_memory_at(size, start_addr, os::nvdimm_fd()); + if (nvdimm_addr != start_addr) { + vm_exit_during_initialization( + err_msg("Could not map memory at %p for NVDIMM %s Fd %d", nvdimm_addr, AllocateOldGenAt, os::nvdimm_fd())); + } else { + log_info(gc, heap)("NVDIMM Memory successfully mapped at %p, Size %lu", start_addr, size); + os::close(os::nvdimm_fd()); + } + } + return; + } os::commit_memory_or_exit(start_addr, size, _page_size, _executable, err_msg("Failed to commit area from " PTR_FORMAT " to " PTR_FORMAT " of length " SIZE_FORMAT ".", p2i(start_addr), p2i(start_addr + size), size)); --- old/share/gc/g1/heapRegionManager.hpp 2018-06-12 04:14:32.915190745 -0700 +++ new/share/gc/g1/heapRegionManager.hpp 2018-06-12 04:14:32.859190744 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -168,8 +168,14 @@ } HeapRegion* allocate_free_region(bool is_old) { - HeapRegion* hr = _free_list.remove_region(is_old); - + HeapRegion* hr = NULL; + if (os::has_nvdimm() && !is_old) { + hr = _free_list.show_tail(); + if ((address)(hr->top()) < os::dram_heapbase()) { + return NULL; + } + } + hr = _free_list.remove_region(is_old); if (hr != NULL) { assert(hr->next() == NULL, "Single region should not have next"); assert(is_available(hr->hrm_index()), "Must be committed"); --- old/share/gc/g1/heapRegionSet.hpp 2018-06-12 04:14:33.403190759 -0700 +++ new/share/gc/g1/heapRegionSet.hpp 2018-06-12 04:14:33.359190758 -0700 @@ -192,7 +192,7 @@ // Assumes that the list is ordered and will preserve that order. The order // is determined by hrm_index. inline void add_ordered(HeapRegion* hr); - + inline HeapRegion* show_tail() { return _tail; } // Removes from head or tail based on the given argument. HeapRegion* remove_region(bool from_head); --- old/share/gc/parallel/adjoiningGenerations.cpp 2018-06-12 04:14:33.843190771 -0700 +++ new/share/gc/parallel/adjoiningGenerations.cpp 2018-06-12 04:14:33.815190770 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,6 +60,15 @@ // a virtual to each generation for initialization of the // generation. + if (os::has_nvdimm()) { + _virtual_spaces.setup_fd(old_young_rs.nvdimm_fd()); +#if defined(_WINDOWS) + log_info(gc, heap)("On Windows, ParallelOldGC with NVDIMM does not support adaptive sizing of OldGen. "); + init_low_byte_size = policy->max_old_size(); + min_low_byte_size = policy->max_old_size(); + max_low_byte_size = policy->max_old_size(); +#endif + } // Does the actual creation of the virtual spaces _virtual_spaces.initialize(max_low_byte_size, init_low_byte_size, @@ -77,7 +86,10 @@ min_low_byte_size, _virtual_spaces.low_byte_size_limit(), "old", 1); - + + if (os::has_nvdimm()) { + _old_gen->setup_fd(old_young_rs.nvdimm_fd()); + } young_gen()->initialize_work(); assert(young_gen()->reserved().byte_size() <= young_gen()->gen_size_limit(), "Consistency check"); @@ -112,6 +124,10 @@ _young_gen->initialize(young_rs, alignment); assert(young_gen()->gen_size_limit() == young_rs.size(), "Consistency check"); + if (os::has_nvdimm()) { + _old_gen->setup_fd(old_young_rs.nvdimm_fd()); + old_rs.setup_fd(old_young_rs.nvdimm_fd()); + } _old_gen->initialize(old_rs, alignment, "old", 1); assert(old_gen()->gen_size_limit() == old_rs.size(), "Consistency check"); } --- old/share/gc/parallel/adjoiningVirtualSpaces.cpp 2018-06-12 04:14:34.215190781 -0700 +++ new/share/gc/parallel/adjoiningVirtualSpaces.cpp 2018-06-12 04:14:34.163190780 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,9 +46,16 @@ ReservedSpace young_rs = _reserved_space.last_part(max_low_byte_size); _low = new PSVirtualSpace(old_rs, alignment()); - if (!_low->expand_by(init_low_byte_size)) { - vm_exit_during_initialization("Could not reserve enough space for " - "object heap"); + if (os::has_nvdimm() && UseParallelOldGC) { + if (!_low->expand_by(init_low_byte_size, _nvdimm_fd)) { + vm_exit_during_initialization("Could not reserve enough space for " + "object heap"); + } + } else { + if (!_low->expand_by(init_low_byte_size)) { + vm_exit_during_initialization("Could not reserve enough space for " + "object heap"); + } } _high = new PSVirtualSpaceHighToLow(young_rs, alignment()); @@ -60,7 +67,12 @@ bool AdjoiningVirtualSpaces::adjust_boundary_up(size_t change_in_bytes) { assert(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary, "runtime check"); - size_t actual_change = low()->expand_into(high(), change_in_bytes); + size_t actual_change = 0; + if (os::has_nvdimm() && UseParallelOldGC) { + actual_change = low()->expand_into(high(), change_in_bytes, nvdimm_fd()); + } else { + actual_change = low()->expand_into(high(), change_in_bytes); + } return actual_change != 0; } --- old/share/gc/parallel/adjoiningVirtualSpaces.hpp 2018-06-12 04:14:34.635190793 -0700 +++ new/share/gc/parallel/adjoiningVirtualSpaces.hpp 2018-06-12 04:14:34.595190792 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -75,6 +75,8 @@ const size_t _alignment; + int _nvdimm_fd; + public: // Allocates two virtual spaces that will be located at the // high and low ends. Does no initialization. @@ -110,6 +112,8 @@ void initialize(size_t max_low_byte_size, size_t init_low_byte_size, size_t init_high_byte_size); + void setup_fd(int fd) {_nvdimm_fd = fd; } + int nvdimm_fd() const {return _nvdimm_fd; } }; #endif // SHARE_VM_GC_PARALLEL_ADJOININGVIRTUALSPACES_HPP --- old/share/gc/parallel/psOldGen.cpp 2018-06-12 04:14:35.239190810 -0700 +++ new/share/gc/parallel/psOldGen.cpp 2018-06-12 04:14:35.187190808 -0700 @@ -72,9 +72,24 @@ void PSOldGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) { _virtual_space = new PSVirtualSpace(rs, alignment); - if (!_virtual_space->expand_by(_init_gen_size)) { - vm_exit_during_initialization("Could not reserve enough space for " - "object heap"); + if (os::has_nvdimm() && UseParallelOldGC) { + if (!_virtual_space->expand_by(_init_gen_size, _nvdimm_fd)) { + vm_exit_during_initialization("Could not reserve enough space for " + "object heap"); + } +#if defined (_WINDOWS) + // Windows OS does not support incremental mapping for DAX (NVDIMM) File System + if (os::has_nvdimm()) { + os::close(os::nvdimm_fd()); + } +#endif + os::set_nvdimm_heapbase((address)(_virtual_space->reserved_low_addr())); + os::set_dram_heapbase((address)((char*)_virtual_space->reserved_low_addr() + _max_gen_size)); + } else { + if (!_virtual_space->expand_by(_init_gen_size)) { + vm_exit_during_initialization("Could not reserve enough space for " + "object heap"); + } } } @@ -275,7 +290,12 @@ if (bytes == 0) { return true; // That's what virtual_space()->expand_by(0) would return } - bool result = virtual_space()->expand_by(bytes); + bool result = false; + if (os::has_nvdimm() && UseParallelOldGC) { + result = virtual_space()->expand_by(bytes, nvdimm_fd()); + } else { + result = virtual_space()->expand_by(bytes); + } if (result) { if (ZapUnusedHeapArea) { // We need to mangle the newly expanded area. The memregion spans --- old/share/gc/parallel/psOldGen.hpp 2018-06-12 04:14:35.583190819 -0700 +++ new/share/gc/parallel/psOldGen.hpp 2018-06-12 04:14:35.543190818 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -59,6 +59,9 @@ const size_t _min_gen_size; const size_t _max_gen_size; + // NVDIMM handle to expand/shrink old gen on NVDIMM. + int _nvdimm_fd; + // Used when initializing the _name field. static inline const char* select_name(); @@ -137,7 +140,7 @@ MemRegion reserved() const { return _reserved; } virtual size_t max_gen_size() { return _max_gen_size; } size_t min_gen_size() { return _min_gen_size; } - + int nvdimm_fd() { return _nvdimm_fd; } // Returns limit on the maximum size of the generation. This // is the same as _max_gen_size for PSOldGen but need not be // for a derived class. @@ -218,6 +221,8 @@ // Printing support virtual const char* name() const { return _name; } + // set up fd for NVDIMM + void setup_fd(int fd) {_nvdimm_fd = fd; } // Debugging support // Save the tops of all spaces for later use during mangling. void record_spaces_top() PRODUCT_RETURN; --- old/share/gc/parallel/psVirtualspace.cpp 2018-06-12 04:14:35.939190829 -0700 +++ new/share/gc/parallel/psVirtualspace.cpp 2018-06-12 04:14:35.907190828 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -98,6 +98,21 @@ return result; } +bool PSVirtualSpace::expand_by(size_t bytes, int fd) { + assert(is_aligned(bytes), "arg not aligned"); + DEBUG_ONLY(PSVirtualSpaceVerifier this_verifier(this)); + if (uncommitted_size() < bytes) { + return false; + } + char* const base_addr = committed_high_addr(); + bool commit_result = os::commit_memory(base_addr, bytes, alignment(), !ExecMem, fd, committed_size()); + bool result = special() || commit_result; + if (result) { + _committed_high_addr += bytes; + } + return result; +} + bool PSVirtualSpace::shrink_by(size_t bytes) { assert(is_aligned(bytes), "arg not aligned"); DEBUG_ONLY(PSVirtualSpaceVerifier this_verifier(this)); @@ -176,6 +191,66 @@ return bytes; } +size_t +PSVirtualSpace::expand_into(PSVirtualSpace* other_space, size_t bytes, int fd) { + assert(is_aligned(bytes), "arg not aligned"); + assert(grows_up(), "this space must grow up"); + assert(other_space->grows_down(), "other space must grow down"); + assert(reserved_high_addr() == other_space->reserved_low_addr(), + "spaces not contiguous"); + assert(special() == other_space->special(), "one space is special, the other is not"); + DEBUG_ONLY(PSVirtualSpaceVerifier this_verifier(this)); + DEBUG_ONLY(PSVirtualSpaceVerifier other_verifier(other_space)); + + size_t bytes_needed = bytes; + + // First use the uncommitted region in this space. + size_t tmp_bytes = MIN2(uncommitted_size(), bytes_needed); + if (tmp_bytes > 0) { + if (expand_by(tmp_bytes)) { + bytes_needed -= tmp_bytes; + } else { + return 0; + } + } + + // Next take from the uncommitted region in the other space, and commit it. + tmp_bytes = MIN2(other_space->uncommitted_size(), bytes_needed); + if (tmp_bytes > 0) { + char* const commit_base = committed_high_addr(); + if (other_space->special() || + os::commit_memory(commit_base, tmp_bytes, alignment(), !ExecMem, fd, committed_size())) { + // Reduce the reserved region in the other space. + other_space->set_reserved(other_space->reserved_low_addr() + tmp_bytes, + other_space->reserved_high_addr(), + other_space->special()); + + // Grow both reserved and committed in this space. + _reserved_high_addr += tmp_bytes; + _committed_high_addr += tmp_bytes; + bytes_needed -= tmp_bytes; + } else { + return bytes - bytes_needed; + } + } + + // Finally take from the already committed region in the other space. + tmp_bytes = bytes_needed; + if (tmp_bytes > 0) { + // Reduce both committed and reserved in the other space. + other_space->set_committed(other_space->committed_low_addr() + tmp_bytes, + other_space->committed_high_addr()); + other_space->set_reserved(other_space->reserved_low_addr() + tmp_bytes, + other_space->reserved_high_addr(), + other_space->special()); + + // Grow both reserved and committed in this space. + _reserved_high_addr += tmp_bytes; + _committed_high_addr += tmp_bytes; + } + return bytes; +} + #ifndef PRODUCT bool PSVirtualSpace::is_aligned(size_t value, size_t align) { const size_t tmp_value = value + align - 1; --- old/share/gc/parallel/psVirtualspace.hpp 2018-06-12 04:14:36.339190840 -0700 +++ new/share/gc/parallel/psVirtualspace.hpp 2018-06-12 04:14:36.295190839 -0700 @@ -91,6 +91,10 @@ virtual size_t expand_into(PSVirtualSpace* space, size_t bytes); void release(); + // NVDIMM versions to manage expansion/shrinking + bool expand_by(size_t bytes, int fd); + size_t expand_into(PSVirtualSpace* space, size_t bytes, int fd); + #ifndef PRODUCT // Debugging static bool is_aligned(size_t val, size_t align); --- old/share/gc/serial/serialArguments.cpp 2018-06-12 04:14:36.707190850 -0700 +++ new/share/gc/serial/serialArguments.cpp 2018-06-12 04:14:36.667190849 -0700 @@ -34,5 +34,8 @@ } CollectedHeap* SerialArguments::create_heap() { + if (AllocateOldGenAt != NULL) { + vm_exit_during_initialization("The flag -XX:AllocateOldGenAt can not be used with SerialGC. Only ParallelOldGC and G1GC are supported.", NULL); + } return create_heap_with_policy(); } --- old/share/memory/virtualspace.cpp 2018-06-12 04:14:37.071190860 -0700 +++ new/share/memory/virtualspace.cpp 2018-06-12 04:14:37.031190859 -0700 @@ -35,11 +35,13 @@ // ReservedSpace // Dummy constructor -ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0), - _alignment(0), _special(false), _executable(false), _fd_for_heap(-1) { +ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0), + _nvdimm_base_nv(NULL), _nvdimm_size(0), _dram_size(0), + _alignment(0), _special(false), _executable(false), _fd_for_heap(-1), _fd_for_nvdimm(-1) { } -ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) { +ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1), _fd_for_nvdimm(-1), + _nvdimm_base_nv(NULL), _nvdimm_size(0), _dram_size(0) { bool has_preferred_page_size = preferred_page_size != 0; // Want to use large pages where possible and pad with small pages. size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1); @@ -60,13 +62,15 @@ ReservedSpace::ReservedSpace(size_t size, size_t alignment, bool large, - char* requested_address) : _fd_for_heap(-1) { + char* requested_address) : _fd_for_heap(-1), _fd_for_nvdimm(-1), + _nvdimm_base_nv(NULL), _nvdimm_size(0), _dram_size(0) { initialize(size, alignment, large, requested_address, false); } ReservedSpace::ReservedSpace(size_t size, size_t alignment, bool large, - bool executable) : _fd_for_heap(-1) { + bool executable) : _fd_for_heap(-1), _fd_for_nvdimm(-1), + _nvdimm_base_nv(NULL), _nvdimm_size(0), _dram_size(0) { initialize(size, alignment, large, NULL, executable); } @@ -143,6 +147,7 @@ } char* base = NULL; + char* nvdimm_base = NULL; if (special) { @@ -184,7 +189,11 @@ base = NULL; } } else { - base = os::reserve_memory(size, NULL, alignment, _fd_for_heap); + if (_nvdimm_base_nv != NULL && _fd_for_nvdimm != -1) { + base = os::reserve_memory(_dram_size, _nvdimm_base_nv, alignment, _fd_for_heap); + } else { + base = os::reserve_memory(size, NULL, alignment, _fd_for_heap); + } } if (base == NULL) return; @@ -210,6 +219,9 @@ } // Done _base = base; + _nvdimm_base = _base-_nvdimm_size; + _nvdimm_base_nv = NULL; + _dram_size = (size_t)size; _size = size; _alignment = alignment; // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true @@ -225,13 +237,15 @@ "size not allocation aligned"); _base = base; _size = size; + _nvdimm_base = NULL; + _nvdimm_base_nv = NULL; + _dram_size = (size_t)size; _alignment = alignment; _noaccess_prefix = 0; _special = special; _executable = executable; } - ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment, bool split, bool realloc) { assert(partition_size <= size(), "partition failed"); @@ -277,6 +291,10 @@ if (is_reserved()) { char *real_base = _base - _noaccess_prefix; const size_t real_size = _size + _noaccess_prefix; + // unmap nvdimm + if (_fd_for_nvdimm != -1) { + os::unmap_memory(real_base+real_size, _nvdimm_size); + } if (special()) { if (_fd_for_heap != -1) { os::unmap_memory(real_base, real_size); @@ -287,6 +305,10 @@ os::release_memory(real_base, real_size); } _base = NULL; + _nvdimm_base = NULL; + _nvdimm_base_nv = NULL; + _dram_size = 0; + _nvdimm_size = 0; _size = 0; _noaccess_prefix = 0; _alignment = 0; @@ -341,6 +363,12 @@ release(); } + if (_fd_for_nvdimm != -1 && UseG1GC) { + char* base_nv = os::reserve_memory(size, requested_address, alignment); + initialize_g1gc_nvdimm_dram_sizes(size, alignment); + _nvdimm_base_nv = base_nv+_nvdimm_size; // hint for allocation address of DRAM COMPRESSED HEAP. + } + // If OS doesn't support demand paging for large page memory, we need // to use reserve_memory_special() to reserve and pin the entire region. // If there is a backing file directory for this space then whether @@ -355,6 +383,7 @@ } } char* base = NULL; + char* nvdimm_base = NULL; log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT " heap of size " SIZE_FORMAT_HEX, @@ -390,16 +419,33 @@ // important. If available space is not detected, return NULL. if (requested_address != 0) { - base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap); + if (_nvdimm_base_nv != NULL && _fd_for_nvdimm != -1) { + // first unmap so that OS does not keep trying. + os::unmap_memory(_nvdimm_base_nv, _dram_size); + base = os::attempt_reserve_memory_at(_dram_size, _nvdimm_base_nv); + } else { + base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap); + } } else { - base = os::reserve_memory(size, NULL, alignment, _fd_for_heap); + if (_nvdimm_base_nv != NULL && _fd_for_nvdimm != -1) { + // first unmap so that OS does not keep trying. + os::unmap_memory(_nvdimm_base_nv, _dram_size); + base = os::reserve_memory(_dram_size, _nvdimm_base_nv, alignment); + } else { + base = os::reserve_memory(size, NULL, alignment, _fd_for_heap); + } } } if (base == NULL) { return; } // Done _base = base; - _size = size; + _nvdimm_base = _base-_nvdimm_size; + if (_nvdimm_base_nv != NULL && _fd_for_nvdimm != -1) { + _size = _dram_size; + } else { + _size = size; + } _alignment = alignment; // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true @@ -601,12 +647,41 @@ } } +void ReservedHeapSpace::initialize_g1gc_nvdimm_dram_sizes(size_t size, size_t alignment) { + _dram_size = (size_t)((size * G1MaxNewSizePercent)/100); + size_t page_sz = os::vm_page_size() -1 ; + _dram_size = (_dram_size + page_sz) & (~page_sz); + // align sizes. + _dram_size = align_down(_dram_size, alignment); + _nvdimm_size = size - _dram_size; + _nvdimm_size = (_nvdimm_size + page_sz) & (~page_sz); + _nvdimm_size = align_down(_nvdimm_size, alignment); +} + ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* heap_allocation_directory) : ReservedSpace() { if (size == 0) { return; } + // if AllocateOldGen is used + if (AllocateOldGenAt != NULL) { + _fd_for_nvdimm = os::create_file_for_heap(AllocateOldGenAt); + if (_fd_for_nvdimm == -1) { + vm_exit_during_initialization( + err_msg("Could not create file for Heap at location %s", AllocateOldGenAt)); + } + if (UseParallelOldGC) { + // For ParallelOldGC, adaptive sizing picks _old_gen virtual space sizes as needed. + // allocate Xmx on NVDIMM as adaptive sizing may put lot of pressure on NVDIMM. + os::allocate_file(_fd_for_nvdimm, MaxHeapSize); + os::set_nvdimm_fd(_fd_for_nvdimm); + os::set_nvdimm_present(true); + } + } else { + _fd_for_nvdimm = -1; + } + if (heap_allocation_directory != NULL) { _fd_for_heap = os::create_file_for_heap(heap_allocation_directory); if (_fd_for_heap == -1) { @@ -618,6 +693,18 @@ // Heap size should be aligned to alignment, too. guarantee(is_aligned(size, alignment), "set by caller"); + char* base_nv = NULL; + _nvdimm_base_nv = NULL; + + if (_fd_for_nvdimm != -1 && UseG1GC) { + if (!UseCompressedOops) { + // if compressed oops use requested address. + initialize_g1gc_nvdimm_dram_sizes(size, alignment); + base_nv = os::reserve_memory(size, NULL, alignment); + _nvdimm_base_nv = base_nv+_nvdimm_size; // hint for allocation address of DRAM heap + } + } + if (UseCompressedOops) { initialize_compressed_heap(size, alignment, large); if (_size > size) { @@ -627,7 +714,11 @@ establish_noaccess_prefix(); } } else { - initialize(size, alignment, large, NULL, false); + if (_fd_for_nvdimm != -1 && UseG1GC) { + initialize(_dram_size, alignment, large, NULL, false); + } else { + initialize(size, alignment, large, NULL, false); + } } assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base, @@ -637,6 +728,15 @@ if (base() != NULL) { MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap); + if (_fd_for_nvdimm != -1 && UseG1GC) { + os::set_nvdimm_present(true); + os::set_dram_heapbase((address)_base); + os::set_nvdimm_heapbase((address)_nvdimm_base); + os::set_nvdimm_fd(_fd_for_nvdimm); + _size += _nvdimm_size; + _base = _nvdimm_base; + log_info(gc, heap)("Java DRAM Heap at [%p - %p] & NVDIMM Old Gen at [%p - %p] %ld \n", _nvdimm_base+_nvdimm_size, (char*)(_nvdimm_base+_nvdimm_size+_dram_size), _nvdimm_base, (char*)(_nvdimm_base+_nvdimm_size), size); + } } if (_fd_for_heap != -1) { --- old/share/memory/virtualspace.hpp 2018-06-12 04:14:37.459190871 -0700 +++ new/share/memory/virtualspace.hpp 2018-06-12 04:14:37.423190870 -0700 @@ -33,11 +33,16 @@ friend class VMStructs; protected: char* _base; + char* _nvdimm_base; + char* _nvdimm_base_nv; size_t _size; + size_t _dram_size; + size_t _nvdimm_size; size_t _noaccess_prefix; size_t _alignment; bool _special; int _fd_for_heap; + int _fd_for_nvdimm; private: bool _executable; @@ -63,7 +68,11 @@ // Accessors char* base() const { return _base; } + char* nvdimm_base() const { return _nvdimm_base; } + int nvdimm_fd() const { return _fd_for_nvdimm; } + void setup_fd(int fd) {_fd_for_nvdimm = fd; } size_t size() const { return _size; } + size_t nvdimm_size() const { return _nvdimm_size; } char* end() const { return _base + _size; } size_t alignment() const { return _alignment; } bool special() const { return _special; } @@ -114,6 +123,7 @@ void initialize_compressed_heap(const size_t size, size_t alignment, bool large); // Create protection page at the beginning of the space. void establish_noaccess_prefix(); + void initialize_g1gc_nvdimm_dram_sizes(size_t size, size_t alignment); public: // Constructor. Tries to find a heap that is good for compressed oops. // heap_allocation_directory is the path to the backing memory for Java heap. When set, Java heap will be allocated --- old/share/runtime/globals.hpp 2018-06-12 04:14:37.875190883 -0700 +++ new/share/runtime/globals.hpp 2018-06-12 04:14:37.847190882 -0700 @@ -2634,6 +2634,9 @@ "Path to the directoy where a temporary file will be created " \ "to use as the backing store for Java Heap.") \ \ + product(ccstr, AllocateOldGenAt, NULL, \ + "OldGen mount point where Old gen heap will be mapped.." ) \ + \ develop(bool, VerifyMetaspace, false, \ "Verify metaspace on chunk movements.") \ \ --- old/share/runtime/java.cpp 2018-06-12 04:14:38.119190889 -0700 +++ new/share/runtime/java.cpp 2018-06-12 04:14:38.079190888 -0700 @@ -592,6 +592,16 @@ void vm_shutdown() { + if (os::has_nvdimm() && UseParallelOldGC) { + // all expand/shrinks for UseParallelOldGC need nvdimm_fd to + // remain open so that more/less sized can be committed to nvdimm +#if defined(_WINDOWS) + // Do nothing as file is already closed as all the memory for OldGen was + // mapped in one shot for NVDIMM. +#else + os::close(os::nvdimm_fd()); +#endif + } vm_perform_shutdown_actions(); os::wait_for_keypress_at_exit(); os::shutdown(); --- old/share/runtime/os.cpp 2018-06-12 04:14:38.503190900 -0700 +++ new/share/runtime/os.cpp 2018-06-12 04:14:38.459190899 -0700 @@ -76,6 +76,11 @@ uintptr_t os::_serialize_page_mask = 0; volatile unsigned int os::_rand_seed = 1; int os::_processor_count = 0; +bool os::_nvdimm_present = false; +int os::_nvdimm_fd = -1; +address os::_dram_heap_base = NULL; +address os::_nvdimm_heap_base = NULL; +uint os::_nvdimm_region_length = 0; int os::_initial_active_processor_count = 0; size_t os::_page_sizes[os::page_sizes_max]; @@ -1746,6 +1751,17 @@ return res; } +bool os::commit_memory(char* addr, size_t size, bool executable, int file_desc, size_t offset) { + + bool res = false; + + if (os::map_memory_to_file(addr, size, file_desc, offset, executable, false) != NULL) { + MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC); + return true; + } + return false; +} + bool os::commit_memory(char* addr, size_t size, size_t alignment_hint, bool executable) { bool res = os::pd_commit_memory(addr, size, alignment_hint, executable); @@ -1755,6 +1771,18 @@ return res; } +bool os::commit_memory(char* addr, size_t size, size_t alignment_hint, + bool executable, int file_desc, size_t offset) { + + // ignoring alignemnt hint. + bool res = false; + if (os::map_memory_to_file(addr, size, file_desc, offset, executable, false) != NULL) { + MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC); + return true; + } + return false; +} + void os::commit_memory_or_exit(char* addr, size_t bytes, bool executable, const char* mesg) { pd_commit_memory_or_exit(addr, bytes, executable, mesg); --- old/share/runtime/os.hpp 2018-06-12 04:14:38.895190911 -0700 +++ new/share/runtime/os.hpp 2018-06-12 04:14:38.847190909 -0700 @@ -228,6 +228,24 @@ // in place until called after initialization has ocurred. return AssumeMP || (_processor_count != 1); } + static inline bool has_nvdimm() { + // This is set AFTER memory is successfully mapped on NVDIMM's + // DAX filesystem + return _nvdimm_present; + } + static inline int nvdimm_fd() { + // ParallelOldGC adaptive sizing requires nvdimm fd. + return _nvdimm_fd; + } + static inline address dram_heapbase() { + return _dram_heap_base; + } + static inline address nvdimm_heapbase() { + return _nvdimm_heap_base; + } + static inline uint nvdimm_regionlength() { + return _nvdimm_region_length; + } static julong available_memory(); static julong physical_memory(); static bool has_allocatable_memory_limit(julong* limit); @@ -243,6 +261,12 @@ } static void set_processor_count(int count) { _processor_count = count; } + static void set_nvdimm_present(bool status) { _nvdimm_present = status; } + static void set_nvdimm_fd(int fd) { _nvdimm_fd = fd; } + static void set_dram_heapbase(address base) {_dram_heap_base = base; } + static void set_nvdimm_heapbase(address base) {_nvdimm_heap_base = base; } + static void set_nvdimm_regionlength(uint length) {_nvdimm_region_length = length; } + // Returns the number of CPUs this process is currently allowed to run on. // Note that on some OSes this can change dynamically. static int active_processor_count(); @@ -331,8 +355,11 @@ static void split_reserved_memory(char *base, size_t size, size_t split, bool realloc); static bool commit_memory(char* addr, size_t bytes, bool executable); + static bool commit_memory(char* addr, size_t bytes, bool executable, int file_desc, size_t offset = 0); static bool commit_memory(char* addr, size_t size, size_t alignment_hint, bool executable); + static bool commit_memory(char* addr, size_t size, size_t alignment_hint, + bool executable, int file_desc, size_t offset = 0); // Same as commit_memory() that either succeeds or calls // vm_exit_out_of_memory() with the specified mesg. static void commit_memory_or_exit(char* addr, size_t bytes, @@ -363,7 +390,8 @@ static int create_file_for_heap(const char* dir); // Map memory to the file referred by fd. This function is slightly different from map_memory() // and is added to be used for implementation of -XX:AllocateHeapAt - static char* map_memory_to_file(char* base, size_t size, int fd); + static char* map_memory_to_file(char* base, size_t size, int fd, int offset = 0, bool exec = false, bool allocate = true); + static int allocate_file(int file_desc, size_t size); // Replace existing reserved memory with file mapping static char* replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd); @@ -1017,7 +1045,11 @@ char fileSep, char pathSep); static bool set_boot_path(char fileSep, char pathSep); - + static bool _nvdimm_present; + static int _nvdimm_fd; + static address _dram_heap_base; + static address _nvdimm_heap_base; + static uint _nvdimm_region_length; }; #ifndef _WINDOWS