1 /* 2 * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 #include "precompiled.hpp" 25 #include "gc/z/zAddress.inline.hpp" 26 #include "gc/z/zGlobals.hpp" 27 #include "gc/z/zLargePages.inline.hpp" 28 #include "gc/z/zPhysicalMemory.inline.hpp" 29 #include "gc/z/zPhysicalMemoryBacking_bsd.hpp" 30 #include "runtime/globals.hpp" 31 #include "runtime/init.hpp" 32 #include "runtime/os.hpp" 33 #include "utilities/align.hpp" 34 #include "utilities/debug.hpp" 35 36 bool ZPhysicalMemoryBacking::is_initialized() const { 37 return _file.is_initialized(); 38 } 39 40 void ZPhysicalMemoryBacking::warn_commit_limits(size_t max) const { 41 // Does nothing 42 } 43 44 bool ZPhysicalMemoryBacking::supports_uncommit() { 45 assert(!is_init_completed(), "Invalid state"); 46 assert(_file.size() >= ZGranuleSize, "Invalid size"); 47 48 // Test if uncommit is supported by uncommitting and then re-committing a granule 49 return commit(uncommit(ZGranuleSize)) == ZGranuleSize; 50 } 51 52 size_t ZPhysicalMemoryBacking::commit(size_t size) { 53 size_t committed = 0; 54 55 // Fill holes in the backing file 56 while (committed < size) { 57 size_t allocated = 0; 58 const size_t remaining = size - committed; 59 const uintptr_t start = _uncommitted.alloc_from_front_at_most(remaining, &allocated); 60 if (start == UINTPTR_MAX) { 61 // No holes to commit 62 break; 63 } 64 65 // Try commit hole 66 const size_t filled = _file.commit(start, allocated); 67 if (filled > 0) { 68 // Successful or partialy successful 69 _committed.free(start, filled); 70 committed += filled; 71 } 72 if (filled < allocated) { 73 // Failed or partialy failed 74 _uncommitted.free(start + filled, allocated - filled); 75 return committed; 76 } 77 } 78 79 // Expand backing file 80 if (committed < size) { 81 const size_t remaining = size - committed; 82 const uintptr_t start = _file.size(); 83 const size_t expanded = _file.commit(start, remaining); 84 if (expanded > 0) { 85 // Successful or partialy successful 86 _committed.free(start, expanded); 87 committed += expanded; 88 } 89 } 90 91 return committed; 92 } 93 94 size_t ZPhysicalMemoryBacking::uncommit(size_t size) { 95 size_t uncommitted = 0; 96 97 // Punch holes in backing file 98 while (uncommitted < size) { 99 size_t allocated = 0; 100 const size_t remaining = size - uncommitted; 101 const uintptr_t start = _committed.alloc_from_back_at_most(remaining, &allocated); 102 assert(start != UINTPTR_MAX, "Allocation should never fail"); 103 104 // Try punch hole 105 const size_t punched = _file.uncommit(start, allocated); 106 if (punched > 0) { 107 // Successful or partialy successful 108 _uncommitted.free(start, punched); 109 uncommitted += punched; 110 } 111 if (punched < allocated) { 112 // Failed or partialy failed 113 _committed.free(start + punched, allocated - punched); 114 return uncommitted; 115 } 116 } 117 118 return uncommitted; 119 } 120 121 ZPhysicalMemory ZPhysicalMemoryBacking::alloc(size_t size) { 122 assert(is_aligned(size, ZGranuleSize), "Invalid size"); 123 124 ZPhysicalMemory pmem; 125 126 // Allocate segments 127 for (size_t allocated = 0; allocated < size; allocated += ZGranuleSize) { 128 const uintptr_t start = _committed.alloc_from_front(ZGranuleSize); 129 assert(start != UINTPTR_MAX, "Allocation should never fail"); 130 pmem.add_segment(ZPhysicalMemorySegment(start, ZGranuleSize)); 131 } 132 133 return pmem; 134 } 135 136 void ZPhysicalMemoryBacking::free(const ZPhysicalMemory& pmem) { 137 const size_t nsegments = pmem.nsegments(); 138 139 // Free segments 140 for (size_t i = 0; i < nsegments; i++) { 141 const ZPhysicalMemorySegment& segment = pmem.segment(i); 142 _committed.free(segment.start(), segment.size()); 143 } 144 } 145 146 void ZPhysicalMemoryBacking::pretouch_view(uintptr_t addr, size_t size) const { 147 const size_t page_size = ZLargePages::is_explicit() ? ZGranuleSize : os::vm_page_size(); 148 os::pretouch_memory((void*)addr, (void*)(addr + size), page_size); 149 } 150 151 void ZPhysicalMemoryBacking::map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const { 152 const size_t nsegments = pmem.nsegments(); 153 size_t size = 0; 154 155 // Map segments 156 for (size_t i = 0; i < nsegments; i++) { 157 const ZPhysicalMemorySegment& segment = pmem.segment(i); 158 const uintptr_t segment_addr = addr + size; 159 _file.map(segment_addr, segment.size(), segment.start()); 160 size += segment.size(); 161 } 162 } 163 164 void ZPhysicalMemoryBacking::unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const { 165 _file.unmap(addr, pmem.size()); 166 } 167 168 uintptr_t ZPhysicalMemoryBacking::nmt_address(uintptr_t offset) const { 169 // From an NMT point of view we treat the first heap view (marked0) as committed 170 return ZAddress::marked0(offset); 171 } 172 173 void ZPhysicalMemoryBacking::pretouch(uintptr_t offset, size_t size) const { 174 if (ZVerifyViews) { 175 // Pre-touch good view 176 pretouch_view(ZAddress::good(offset), size); 177 } else { 178 // Pre-touch all views 179 pretouch_view(ZAddress::marked0(offset), size); 180 pretouch_view(ZAddress::marked1(offset), size); 181 pretouch_view(ZAddress::remapped(offset), size); 182 } 183 } 184 185 void ZPhysicalMemoryBacking::map(const ZPhysicalMemory& pmem, uintptr_t offset) const { 186 if (ZVerifyViews) { 187 // Map good view 188 map_view(pmem, ZAddress::good(offset)); 189 } else { 190 // Map all views 191 map_view(pmem, ZAddress::marked0(offset)); 192 map_view(pmem, ZAddress::marked1(offset)); 193 map_view(pmem, ZAddress::remapped(offset)); 194 } 195 } 196 197 void ZPhysicalMemoryBacking::unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const { 198 if (ZVerifyViews) { 199 // Unmap good view 200 unmap_view(pmem, ZAddress::good(offset)); 201 } else { 202 // Unmap all views 203 unmap_view(pmem, ZAddress::marked0(offset)); 204 unmap_view(pmem, ZAddress::marked1(offset)); 205 unmap_view(pmem, ZAddress::remapped(offset)); 206 } 207 } 208 209 void ZPhysicalMemoryBacking::debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const { 210 // Map good view 211 assert(ZVerifyViews, "Should be enabled"); 212 map_view(pmem, ZAddress::good(offset)); 213 } 214 215 void ZPhysicalMemoryBacking::debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const { 216 // Unmap good view 217 assert(ZVerifyViews, "Should be enabled"); 218 unmap_view(pmem, ZAddress::good(offset)); 219 }