1 /* 2 * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 #ifndef SHARE_GC_Z_ZPAGE_INLINE_HPP 25 #define SHARE_GC_Z_ZPAGE_INLINE_HPP 26 27 #include "gc/z/zAddress.inline.hpp" 28 #include "gc/z/zGlobals.hpp" 29 #include "gc/z/zLiveMap.inline.hpp" 30 #include "gc/z/zMark.hpp" 31 #include "gc/z/zNUMA.hpp" 32 #include "gc/z/zPage.hpp" 33 #include "gc/z/zPhysicalMemory.inline.hpp" 34 #include "gc/z/zVirtualMemory.inline.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "runtime/atomic.hpp" 37 #include "runtime/os.hpp" 38 #include "utilities/align.hpp" 39 #include "utilities/debug.hpp" 40 41 inline uint8_t ZPage::type_from_size(size_t size) const { 42 switch (size) { 43 case ZPageSizeSmall: 44 return ZPageTypeSmall; 45 46 case ZPageSizeMedium: 47 return ZPageTypeMedium; 48 49 default: 50 return ZPageTypeLarge; 51 } 52 } 53 54 inline const char* ZPage::type_to_string() const { 55 switch (type()) { 56 case ZPageTypeSmall: 57 return "Small"; 58 59 case ZPageTypeMedium: 60 return "Medium"; 61 62 default: 63 assert(type() == ZPageTypeLarge, "Invalid page type"); 64 return "Large"; 65 } 66 } 67 68 inline uint32_t ZPage::object_max_count() const { 69 switch (type()) { 70 case ZPageTypeLarge: 71 // A large page can only contain a single 72 // object aligned to the start of the page. 73 return 1; 74 75 default: 76 return (uint32_t)(size() >> object_alignment_shift()); 77 } 78 } 79 80 inline size_t ZPage::object_alignment_shift() const { 81 switch (type()) { 82 case ZPageTypeSmall: 83 return ZObjectAlignmentSmallShift; 84 85 case ZPageTypeMedium: 86 return ZObjectAlignmentMediumShift; 87 88 default: 89 assert(type() == ZPageTypeLarge, "Invalid page type"); 90 return ZObjectAlignmentLargeShift; 91 } 92 } 93 94 inline size_t ZPage::object_alignment() const { 95 switch (type()) { 96 case ZPageTypeSmall: 97 return ZObjectAlignmentSmall; 98 99 case ZPageTypeMedium: 100 return ZObjectAlignmentMedium; 101 102 default: 103 assert(type() == ZPageTypeLarge, "Invalid page type"); 104 return ZObjectAlignmentLarge; 105 } 106 } 107 108 inline uint8_t ZPage::type() const { 109 return _type; 110 } 111 112 inline uintptr_t ZPage::start() const { 113 return _virtual.start(); 114 } 115 116 inline uintptr_t ZPage::end() const { 117 return _virtual.end(); 118 } 119 120 inline size_t ZPage::size() const { 121 return _virtual.size(); 122 } 123 124 inline uintptr_t ZPage::top() const { 125 return _top; 126 } 127 128 inline size_t ZPage::remaining() const { 129 return end() - top(); 130 } 131 132 inline const ZPhysicalMemory& ZPage::physical_memory() const { 133 return _physical; 134 } 135 136 inline const ZVirtualMemory& ZPage::virtual_memory() const { 137 return _virtual; 138 } 139 140 inline uint8_t ZPage::numa_id() { 141 if (_numa_id == (uint8_t)-1) { 142 _numa_id = (uint8_t)ZNUMA::memory_id(ZAddress::good(start())); 143 } 144 145 return _numa_id; 146 } 147 148 inline bool ZPage::is_allocating() const { 149 return _seqnum >= ZGlobalSeqNum; 150 } 151 152 inline void ZPage::pin_allocating() { 153 _seqnum = (uint32_t)-1; 154 } 155 156 inline void ZPage::unpin_allocating() { 157 _seqnum = ZGlobalSeqNum; 158 } 159 160 inline bool ZPage::is_relocatable() const { 161 return _seqnum < ZGlobalSeqNum; 162 } 163 164 inline bool ZPage::is_mapped() const { 165 return _seqnum > 0; 166 } 167 168 inline void ZPage::set_pre_mapped() { 169 // The _seqnum variable is also used to signal that the virtual and physical 170 // memory has been mapped. So, we need to set it to non-zero when the memory 171 // has been pre-mapped. 172 _seqnum = 1; 173 } 174 175 inline uint64_t ZPage::last_used() const { 176 return _last_used; 177 } 178 179 inline void ZPage::set_last_used() { 180 _last_used = os::elapsedTime(); 181 } 182 183 inline bool ZPage::is_in(uintptr_t addr) const { 184 const uintptr_t offset = ZAddress::offset(addr); 185 return offset >= start() && offset < top(); 186 } 187 188 inline uintptr_t ZPage::block_start(uintptr_t addr) const { 189 if (block_is_obj(addr)) { 190 return addr; 191 } else { 192 return ZAddress::good(top()); 193 } 194 } 195 196 inline bool ZPage::block_is_obj(uintptr_t addr) const { 197 return ZAddress::offset(addr) < top(); 198 } 199 200 inline bool ZPage::is_marked() const { 201 assert(is_relocatable(), "Invalid page state"); 202 return _livemap.is_marked(); 203 } 204 205 inline bool ZPage::is_object_marked(uintptr_t addr) const { 206 const size_t index = ((ZAddress::offset(addr) - start()) >> object_alignment_shift()) * 2; 207 return _livemap.get(index); 208 } 209 210 inline bool ZPage::is_object_strongly_marked(uintptr_t addr) const { 211 const size_t index = ((ZAddress::offset(addr) - start()) >> object_alignment_shift()) * 2; 212 return _livemap.get(index + 1); 213 } 214 215 inline bool ZPage::is_object_live(uintptr_t addr) const { 216 return is_allocating() || is_object_marked(addr); 217 } 218 219 inline bool ZPage::is_object_strongly_live(uintptr_t addr) const { 220 return is_allocating() || is_object_strongly_marked(addr); 221 } 222 223 inline bool ZPage::mark_object(uintptr_t addr, bool finalizable, bool& inc_live) { 224 assert(ZAddress::is_marked(addr), "Invalid address"); 225 assert(is_relocatable(), "Invalid page state"); 226 assert(is_in(addr), "Invalid address"); 227 228 // Set mark bit 229 const size_t index = ((ZAddress::offset(addr) - start()) >> object_alignment_shift()) * 2; 230 return _livemap.set_atomic(index, finalizable, inc_live); 231 } 232 233 inline void ZPage::inc_live_atomic(uint32_t objects, size_t bytes) { 234 _livemap.inc_live_atomic(objects, bytes); 235 } 236 237 inline uint32_t ZPage::live_objects() const { 238 assert(is_marked(), "Should be marked"); 239 return _livemap.live_objects(); 240 } 241 242 inline size_t ZPage::live_bytes() const { 243 assert(is_marked(), "Should be marked"); 244 return _livemap.live_bytes(); 245 } 246 247 inline void ZPage::object_iterate(ObjectClosure* cl) { 248 _livemap.iterate(cl, ZAddress::good(start()), object_alignment_shift()); 249 } 250 251 inline uintptr_t ZPage::alloc_object(size_t size) { 252 assert(is_allocating(), "Invalid state"); 253 254 const size_t aligned_size = align_up(size, object_alignment()); 255 const uintptr_t addr = top(); 256 const uintptr_t new_top = addr + aligned_size; 257 258 if (new_top > end()) { 259 // Not enough space left 260 return 0; 261 } 262 263 _top = new_top; 264 265 return ZAddress::good(addr); 266 } 267 268 inline uintptr_t ZPage::alloc_object_atomic(size_t size) { 269 assert(is_allocating(), "Invalid state"); 270 271 const size_t aligned_size = align_up(size, object_alignment()); 272 uintptr_t addr = top(); 273 274 for (;;) { 275 const uintptr_t new_top = addr + aligned_size; 276 if (new_top > end()) { 277 // Not enough space left 278 return 0; 279 } 280 281 const uintptr_t prev_top = Atomic::cmpxchg(new_top, &_top, addr); 282 if (prev_top == addr) { 283 // Success 284 return ZAddress::good(addr); 285 } 286 287 // Retry 288 addr = prev_top; 289 } 290 } 291 292 inline bool ZPage::undo_alloc_object(uintptr_t addr, size_t size) { 293 assert(is_allocating(), "Invalid state"); 294 295 const uintptr_t offset = ZAddress::offset(addr); 296 const size_t aligned_size = align_up(size, object_alignment()); 297 const uintptr_t old_top = top(); 298 const uintptr_t new_top = old_top - aligned_size; 299 300 if (new_top != offset) { 301 // Failed to undo allocation, not the last allocated object 302 return false; 303 } 304 305 _top = new_top; 306 307 // Success 308 return true; 309 } 310 311 inline bool ZPage::undo_alloc_object_atomic(uintptr_t addr, size_t size) { 312 assert(is_allocating(), "Invalid state"); 313 314 const uintptr_t offset = ZAddress::offset(addr); 315 const size_t aligned_size = align_up(size, object_alignment()); 316 uintptr_t old_top = top(); 317 318 for (;;) { 319 const uintptr_t new_top = old_top - aligned_size; 320 if (new_top != offset) { 321 // Failed to undo allocation, not the last allocated object 322 return false; 323 } 324 325 const uintptr_t prev_top = Atomic::cmpxchg(new_top, &_top, old_top); 326 if (prev_top == old_top) { 327 // Success 328 return true; 329 } 330 331 // Retry 332 old_top = prev_top; 333 } 334 } 335 336 #endif // SHARE_GC_Z_ZPAGE_INLINE_HPP