< prev index next >

share/gc/parallel/psOldGen.cpp

Print this page
rev 1 : G1GC+POGC+NVDIMM Patch with latest comments incorporated from all.

*** 70,83 **** --- 70,98 ---- } void PSOldGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) { _virtual_space = new PSVirtualSpace(rs, alignment); + if (os::has_nvdimm() && UseParallelOldGC) { + if (!_virtual_space->expand_by(_init_gen_size, _nvdimm_fd)) { + vm_exit_during_initialization("Could not reserve enough space for " + "object heap"); + } + #if defined (_WINDOWS) + // Windows OS does not support incremental mapping for DAX (NVDIMM) File System + if (os::has_nvdimm()) { + os::close(os::nvdimm_fd()); + } + #endif + os::set_nvdimm_heapbase((address)(_virtual_space->reserved_low_addr())); + os::set_dram_heapbase((address)((char*)_virtual_space->reserved_low_addr() + _max_gen_size)); + } else { if (!_virtual_space->expand_by(_init_gen_size)) { vm_exit_during_initialization("Could not reserve enough space for " "object heap"); } + } } void PSOldGen::initialize_work(const char* perf_data_name, int level) { // // Basic memory initialization
*** 273,283 **** assert_lock_strong(ExpandHeap_lock); assert_locked_or_safepoint(Heap_lock); if (bytes == 0) { return true; // That's what virtual_space()->expand_by(0) would return } ! bool result = virtual_space()->expand_by(bytes); if (result) { if (ZapUnusedHeapArea) { // We need to mangle the newly expanded area. The memregion spans // end -> new_end, we assume that top -> end is already mangled. // Do the mangling before post_resize() is called because --- 288,303 ---- assert_lock_strong(ExpandHeap_lock); assert_locked_or_safepoint(Heap_lock); if (bytes == 0) { return true; // That's what virtual_space()->expand_by(0) would return } ! bool result = false; ! if (os::has_nvdimm() && UseParallelOldGC) { ! result = virtual_space()->expand_by(bytes, nvdimm_fd()); ! } else { ! result = virtual_space()->expand_by(bytes); ! } if (result) { if (ZapUnusedHeapArea) { // We need to mangle the newly expanded area. The memregion spans // end -> new_end, we assume that top -> end is already mangled. // Do the mangling before post_resize() is called because
< prev index next >