< prev index next >
src/hotspot/share/gc/z/zPhysicalMemory.cpp
Print this page
@@ -20,14 +20,19 @@
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
+#include "gc/z/zAddress.inline.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "gc/z/zLargePages.inline.hpp"
+#include "gc/z/zNUMA.inline.hpp"
#include "gc/z/zPhysicalMemory.inline.hpp"
-#include "logging/log.hpp"
-#include "memory/allocation.inline.hpp"
+#include "runtime/init.hpp"
+#include "runtime/os.hpp"
#include "services/memTracker.hpp"
+#include "utilities/align.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
ZPhysicalMemory::ZPhysicalMemory() :
_nsegments(0),
@@ -137,60 +142,204 @@
void ZPhysicalMemoryManager::warn_commit_limits(size_t max) const {
_backing.warn_commit_limits(max);
}
bool ZPhysicalMemoryManager::supports_uncommit() {
- return _backing.supports_uncommit();
+ assert(!is_init_completed(), "Invalid state");
+ assert(_backing.size() >= ZGranuleSize, "Invalid size");
+
+ // Test if uncommit is supported by uncommitting and then re-committing a granule
+ return commit(uncommit(ZGranuleSize)) == ZGranuleSize;
}
void ZPhysicalMemoryManager::nmt_commit(const ZPhysicalMemory& pmem, uintptr_t offset) const {
- const uintptr_t addr = _backing.nmt_address(offset);
+ // From an NMT point of view we treat the first heap view (marked0) as committed
+ const uintptr_t addr = ZAddress::marked0(offset);
const size_t size = pmem.size();
MemTracker::record_virtual_memory_commit((void*)addr, size, CALLER_PC);
}
void ZPhysicalMemoryManager::nmt_uncommit(const ZPhysicalMemory& pmem, uintptr_t offset) const {
if (MemTracker::tracking_level() > NMT_minimal) {
- const uintptr_t addr = _backing.nmt_address(offset);
+ const uintptr_t addr = ZAddress::marked0(offset);
const size_t size = pmem.size();
Tracker tracker(Tracker::uncommit);
tracker.record((address)addr, size);
}
}
size_t ZPhysicalMemoryManager::commit(size_t size) {
- return _backing.commit(size);
+ size_t committed = 0;
+
+ // Fill holes in the backing memory
+ while (committed < size) {
+ size_t allocated = 0;
+ const size_t remaining = size - committed;
+ const uintptr_t start = _uncommitted.alloc_from_front_at_most(remaining, &allocated);
+ if (start == UINTPTR_MAX) {
+ // No holes to commit
+ break;
+ }
+
+ // Try commit hole
+ const size_t filled = _backing.commit(start, allocated);
+ if (filled > 0) {
+ // Successful or partialy successful
+ _committed.free(start, filled);
+ committed += filled;
+ }
+ if (filled < allocated) {
+ // Failed or partialy failed
+ _uncommitted.free(start + filled, allocated - filled);
+ return committed;
+ }
+ }
+
+ // Expand backing memory
+ if (committed < size) {
+ const size_t remaining = size - committed;
+ const uintptr_t start = _backing.size();
+ const size_t expanded = _backing.commit(start, remaining);
+ if (expanded > 0) {
+ // Successful or partialy successful
+ _committed.free(start, expanded);
+ committed += expanded;
+ }
+ }
+
+ return committed;
}
size_t ZPhysicalMemoryManager::uncommit(size_t size) {
- return _backing.uncommit(size);
+ size_t uncommitted = 0;
+
+ // Punch holes in backing memory
+ while (uncommitted < size) {
+ size_t allocated = 0;
+ const size_t remaining = size - uncommitted;
+ const uintptr_t start = _committed.alloc_from_back_at_most(remaining, &allocated);
+ assert(start != UINTPTR_MAX, "Allocation should never fail");
+
+ // Try punch hole
+ const size_t punched = _backing.uncommit(start, allocated);
+ if (punched > 0) {
+ // Successful or partialy successful
+ _uncommitted.free(start, punched);
+ uncommitted += punched;
+ }
+ if (punched < allocated) {
+ // Failed or partialy failed
+ _committed.free(start + punched, allocated - punched);
+ return uncommitted;
+ }
+ }
+
+ return uncommitted;
}
ZPhysicalMemory ZPhysicalMemoryManager::alloc(size_t size) {
- return _backing.alloc(size);
+ assert(is_aligned(size, ZGranuleSize), "Invalid size");
+
+ ZPhysicalMemory pmem;
+
+ // Allocate segments
+ for (size_t allocated = 0; allocated < size; allocated += ZGranuleSize) {
+ const uintptr_t start = _committed.alloc_from_front(ZGranuleSize);
+ assert(start != UINTPTR_MAX, "Allocation should never fail");
+ pmem.add_segment(ZPhysicalMemorySegment(start, ZGranuleSize));
+ }
+
+ return pmem;
}
void ZPhysicalMemoryManager::free(const ZPhysicalMemory& pmem) {
- _backing.free(pmem);
+ const size_t nsegments = pmem.nsegments();
+
+ // Free segments
+ for (size_t i = 0; i < nsegments; i++) {
+ const ZPhysicalMemorySegment& segment = pmem.segment(i);
+ _committed.free(segment.start(), segment.size());
+ }
+}
+
+void ZPhysicalMemoryManager::pretouch_view(uintptr_t addr, size_t size) const {
+ const size_t page_size = ZLargePages::is_explicit() ? ZGranuleSize : os::vm_page_size();
+ os::pretouch_memory((void*)addr, (void*)(addr + size), page_size);
+}
+
+void ZPhysicalMemoryManager::map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
+ const size_t nsegments = pmem.nsegments();
+ size_t size = 0;
+
+ // Map segments
+ for (size_t i = 0; i < nsegments; i++) {
+ const ZPhysicalMemorySegment& segment = pmem.segment(i);
+ _backing.map(addr + size, segment.size(), segment.start());
+ size += segment.size();
+ }
+
+ // Setup NUMA interleaving
+ if (ZNUMA::is_enabled()) {
+ os::numa_make_global((char*)addr, size);
+ }
+
+ // Setup transparent large pages
+ if (ZLargePages::is_transparent()) {
+ os::realign_memory((char*)addr, size, os::large_page_size());
+ }
+}
+
+void ZPhysicalMemoryManager::unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
+ _backing.unmap(addr, pmem.size());
}
void ZPhysicalMemoryManager::pretouch(uintptr_t offset, size_t size) const {
- _backing.pretouch(offset, size);
+ if (ZVerifyViews) {
+ // Pre-touch good view
+ pretouch_view(ZAddress::good(offset), size);
+ } else {
+ // Pre-touch all views
+ pretouch_view(ZAddress::marked0(offset), size);
+ pretouch_view(ZAddress::marked1(offset), size);
+ pretouch_view(ZAddress::remapped(offset), size);
+ }
}
void ZPhysicalMemoryManager::map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
- _backing.map(pmem, offset);
+ if (ZVerifyViews) {
+ // Map good view
+ map_view(pmem, ZAddress::good(offset));
+ } else {
+ // Map all views
+ map_view(pmem, ZAddress::marked0(offset));
+ map_view(pmem, ZAddress::marked1(offset));
+ map_view(pmem, ZAddress::remapped(offset));
+ }
+
nmt_commit(pmem, offset);
}
void ZPhysicalMemoryManager::unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
nmt_uncommit(pmem, offset);
- _backing.unmap(pmem, offset);
+
+ if (ZVerifyViews) {
+ // Unmap good view
+ unmap_view(pmem, ZAddress::good(offset));
+ } else {
+ // Unmap all views
+ unmap_view(pmem, ZAddress::marked0(offset));
+ unmap_view(pmem, ZAddress::marked1(offset));
+ unmap_view(pmem, ZAddress::remapped(offset));
+ }
}
void ZPhysicalMemoryManager::debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
- _backing.debug_map(pmem, offset);
+ // Map good view
+ assert(ZVerifyViews, "Should be enabled");
+ map_view(pmem, ZAddress::good(offset));
}
void ZPhysicalMemoryManager::debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
- _backing.debug_unmap(pmem, offset);
+ // Unmap good view
+ assert(ZVerifyViews, "Should be enabled");
+ unmap_view(pmem, ZAddress::good(offset));
}
< prev index next >