1 /* 2 * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 #ifndef SHARE_GC_Z_ZPAGE_INLINE_HPP 25 #define SHARE_GC_Z_ZPAGE_INLINE_HPP 26 27 #include "gc/z/zAddress.inline.hpp" 28 #include "gc/z/zForwardingTable.inline.hpp" 29 #include "gc/z/zGlobals.hpp" 30 #include "gc/z/zLiveMap.inline.hpp" 31 #include "gc/z/zMark.hpp" 32 #include "gc/z/zNUMA.hpp" 33 #include "gc/z/zPage.hpp" 34 #include "gc/z/zPhysicalMemory.inline.hpp" 35 #include "gc/z/zVirtualMemory.inline.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "runtime/atomic.hpp" 38 #include "utilities/align.hpp" 39 #include "utilities/debug.hpp" 40 41 inline const char* ZPage::type_to_string() const { 42 switch (type()) { 43 case ZPageTypeSmall: 44 return "Small"; 45 46 case ZPageTypeMedium: 47 return "Medium"; 48 49 default: 50 assert(type() == ZPageTypeLarge, "Invalid page type"); 51 return "Large"; 52 } 53 } 54 55 inline uint32_t ZPage::object_max_count() const { 56 switch (type()) { 57 case ZPageTypeLarge: 58 // A large page can only contain a single 59 // object aligned to the start of the page. 60 return 1; 61 62 default: 63 return (uint32_t)(size() >> object_alignment_shift()); 64 } 65 } 66 67 inline size_t ZPage::object_alignment_shift() const { 68 switch (type()) { 69 case ZPageTypeSmall: 70 return ZObjectAlignmentSmallShift; 71 72 case ZPageTypeMedium: 73 return ZObjectAlignmentMediumShift; 74 75 default: 76 assert(type() == ZPageTypeLarge, "Invalid page type"); 77 return ZObjectAlignmentLargeShift; 78 } 79 } 80 81 inline size_t ZPage::object_alignment() const { 82 switch (type()) { 83 case ZPageTypeSmall: 84 return ZObjectAlignmentSmall; 85 86 case ZPageTypeMedium: 87 return ZObjectAlignmentMedium; 88 89 default: 90 assert(type() == ZPageTypeLarge, "Invalid page type"); 91 return ZObjectAlignmentLarge; 92 } 93 } 94 95 inline uint8_t ZPage::type() const { 96 return _type; 97 } 98 99 inline uintptr_t ZPage::start() const { 100 return _virtual.start(); 101 } 102 103 inline uintptr_t ZPage::end() const { 104 return _virtual.end(); 105 } 106 107 inline size_t ZPage::size() const { 108 return _virtual.size(); 109 } 110 111 inline uintptr_t ZPage::top() const { 112 return _top; 113 } 114 115 inline size_t ZPage::remaining() const { 116 return end() - top(); 117 } 118 119 inline ZPhysicalMemory& ZPage::physical_memory() { 120 return _physical; 121 } 122 123 inline const ZVirtualMemory& ZPage::virtual_memory() const { 124 return _virtual; 125 } 126 127 inline uint8_t ZPage::numa_id() { 128 if (_numa_id == (uint8_t)-1) { 129 _numa_id = (uint8_t)ZNUMA::memory_id(ZAddress::good(start())); 130 } 131 132 return _numa_id; 133 } 134 135 inline bool ZPage::inc_refcount() { 136 for (uint32_t prev_refcount = _refcount; prev_refcount > 0; prev_refcount = _refcount) { 137 if (Atomic::cmpxchg(prev_refcount + 1, &_refcount, prev_refcount) == prev_refcount) { 138 return true; 139 } 140 } 141 return false; 142 } 143 144 inline bool ZPage::dec_refcount() { 145 assert(is_active(), "Should be active"); 146 return Atomic::sub(1u, &_refcount) == 0; 147 } 148 149 inline bool ZPage::is_in(uintptr_t addr) const { 150 const uintptr_t offset = ZAddress::offset(addr); 151 return offset >= start() && offset < top(); 152 } 153 154 inline uintptr_t ZPage::block_start(uintptr_t addr) const { 155 if (block_is_obj(addr)) { 156 return addr; 157 } else { 158 return ZAddress::good(top()); 159 } 160 } 161 162 inline bool ZPage::block_is_obj(uintptr_t addr) const { 163 return ZAddress::offset(addr) < top(); 164 } 165 166 inline bool ZPage::is_active() const { 167 return _refcount > 0; 168 } 169 170 inline bool ZPage::is_allocating() const { 171 return is_active() && _seqnum == ZGlobalSeqNum; 172 } 173 174 inline bool ZPage::is_relocatable() const { 175 return is_active() && _seqnum < ZGlobalSeqNum; 176 } 177 178 inline bool ZPage::is_detached() const { 179 return _physical.is_null(); 180 } 181 182 inline bool ZPage::is_mapped() const { 183 return _seqnum > 0; 184 } 185 186 inline void ZPage::set_pre_mapped() { 187 // The _seqnum variable is also used to signal that the virtual and physical 188 // memory has been mapped. So, we need to set it to non-zero when the memory 189 // has been pre-mapped. 190 _seqnum = 1; 191 } 192 193 inline bool ZPage::is_pinned() const { 194 return _pinned; 195 } 196 197 inline void ZPage::set_pinned() { 198 _pinned = 1; 199 } 200 201 inline bool ZPage::is_forwarding() const { 202 return !_forwarding.is_null(); 203 } 204 205 inline void ZPage::set_forwarding() { 206 assert(is_marked(), "Should be marked"); 207 _forwarding.setup(_livemap.live_objects()); 208 } 209 210 inline void ZPage::reset_forwarding() { 211 _forwarding.reset(); 212 _pinned = 0; 213 } 214 215 inline void ZPage::verify_forwarding() const { 216 _forwarding.verify(object_max_count(), _livemap.live_objects()); 217 } 218 219 inline bool ZPage::is_marked() const { 220 assert(is_relocatable(), "Invalid page state"); 221 return _livemap.is_marked(); 222 } 223 224 inline bool ZPage::is_object_marked(uintptr_t addr) const { 225 const size_t index = ((ZAddress::offset(addr) - start()) >> object_alignment_shift()) * 2; 226 return _livemap.get(index); 227 } 228 229 inline bool ZPage::is_object_strongly_marked(uintptr_t addr) const { 230 const size_t index = ((ZAddress::offset(addr) - start()) >> object_alignment_shift()) * 2; 231 return _livemap.get(index + 1); 232 } 233 234 inline bool ZPage::is_object_live(uintptr_t addr) const { 235 return is_allocating() || is_object_marked(addr); 236 } 237 238 inline bool ZPage::is_object_strongly_live(uintptr_t addr) const { 239 return is_allocating() || is_object_strongly_marked(addr); 240 } 241 242 inline bool ZPage::mark_object(uintptr_t addr, bool finalizable, bool& inc_live) { 243 assert(ZAddress::is_marked(addr), "Invalid address"); 244 assert(is_relocatable(), "Invalid page state"); 245 assert(is_in(addr), "Invalid address"); 246 247 // Set mark bit 248 const size_t index = ((ZAddress::offset(addr) - start()) >> object_alignment_shift()) * 2; 249 return _livemap.set_atomic(index, finalizable, inc_live); 250 } 251 252 inline void ZPage::inc_live_atomic(uint32_t objects, size_t bytes) { 253 _livemap.inc_live_atomic(objects, bytes); 254 } 255 256 inline size_t ZPage::live_bytes() const { 257 assert(is_marked(), "Should be marked"); 258 return _livemap.live_bytes(); 259 } 260 261 inline void ZPage::object_iterate(ObjectClosure* cl) { 262 _livemap.iterate(cl, ZAddress::good(start()), object_alignment_shift()); 263 } 264 265 inline uintptr_t ZPage::alloc_object(size_t size) { 266 assert(is_allocating(), "Invalid state"); 267 268 const size_t aligned_size = align_up(size, object_alignment()); 269 const uintptr_t addr = top(); 270 const uintptr_t new_top = addr + aligned_size; 271 272 if (new_top > end()) { 273 // Not enough space left 274 return 0; 275 } 276 277 _top = new_top; 278 279 return ZAddress::good(addr); 280 } 281 282 inline uintptr_t ZPage::alloc_object_atomic(size_t size) { 283 assert(is_allocating(), "Invalid state"); 284 285 const size_t aligned_size = align_up(size, object_alignment()); 286 uintptr_t addr = top(); 287 288 for (;;) { 289 const uintptr_t new_top = addr + aligned_size; 290 if (new_top > end()) { 291 // Not enough space left 292 return 0; 293 } 294 295 const uintptr_t prev_top = Atomic::cmpxchg(new_top, &_top, addr); 296 if (prev_top == addr) { 297 // Success 298 return ZAddress::good(addr); 299 } 300 301 // Retry 302 addr = prev_top; 303 } 304 } 305 306 inline bool ZPage::undo_alloc_object(uintptr_t addr, size_t size) { 307 assert(is_allocating(), "Invalid state"); 308 309 const uintptr_t offset = ZAddress::offset(addr); 310 const size_t aligned_size = align_up(size, object_alignment()); 311 const uintptr_t old_top = top(); 312 const uintptr_t new_top = old_top - aligned_size; 313 314 if (new_top != offset) { 315 // Failed to undo allocation, not the last allocated object 316 return false; 317 } 318 319 _top = new_top; 320 321 // Success 322 return true; 323 } 324 325 inline bool ZPage::undo_alloc_object_atomic(uintptr_t addr, size_t size) { 326 assert(is_allocating(), "Invalid state"); 327 328 const uintptr_t offset = ZAddress::offset(addr); 329 const size_t aligned_size = align_up(size, object_alignment()); 330 uintptr_t old_top = top(); 331 332 for (;;) { 333 const uintptr_t new_top = old_top - aligned_size; 334 if (new_top != offset) { 335 // Failed to undo allocation, not the last allocated object 336 return false; 337 } 338 339 const uintptr_t prev_top = Atomic::cmpxchg(new_top, &_top, old_top); 340 if (prev_top == old_top) { 341 // Success 342 return true; 343 } 344 345 // Retry 346 old_top = prev_top; 347 } 348 } 349 350 #endif // SHARE_GC_Z_ZPAGE_INLINE_HPP