1 /*
   2  * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #ifndef SHARE_GC_Z_ZPAGE_INLINE_HPP
  25 #define SHARE_GC_Z_ZPAGE_INLINE_HPP
  26 
  27 #include "gc/z/zAddress.inline.hpp"
  28 #include "gc/z/zGlobals.hpp"
  29 #include "gc/z/zLiveMap.inline.hpp"
  30 #include "gc/z/zMark.hpp"
  31 #include "gc/z/zNUMA.hpp"
  32 #include "gc/z/zPage.hpp"
  33 #include "gc/z/zPhysicalMemory.inline.hpp"
  34 #include "gc/z/zVirtualMemory.inline.hpp"
  35 #include "oops/oop.inline.hpp"
  36 #include "runtime/atomic.hpp"
  37 #include "runtime/os.hpp"
  38 #include "utilities/align.hpp"
  39 #include "utilities/debug.hpp"
  40 
  41 inline uint8_t ZPage::type_from_size(size_t size) const {
  42   switch (size) {
  43   case ZPageSizeSmall:
  44     return ZPageTypeSmall;
  45 
  46   case ZPageSizeMedium:
  47     return ZPageTypeMedium;
  48 
  49   default:
  50     return ZPageTypeLarge;
  51   }
  52 }
  53 
  54 inline const char* ZPage::type_to_string() const {
  55   switch (type()) {
  56   case ZPageTypeSmall:
  57     return "Small";
  58 
  59   case ZPageTypeMedium:
  60     return "Medium";
  61 
  62   default:
  63     assert(type() == ZPageTypeLarge, "Invalid page type");
  64     return "Large";
  65   }
  66 }
  67 
  68 inline uint32_t ZPage::object_max_count() const {
  69   switch (type()) {
  70   case ZPageTypeLarge:
  71     // A large page can only contain a single
  72     // object aligned to the start of the page.
  73     return 1;
  74 
  75   default:
  76     return (uint32_t)(size() >> object_alignment_shift());
  77   }
  78 }
  79 
  80 inline size_t ZPage::object_alignment_shift() const {
  81   switch (type()) {
  82   case ZPageTypeSmall:
  83     return ZObjectAlignmentSmallShift;
  84 
  85   case ZPageTypeMedium:
  86     return ZObjectAlignmentMediumShift;
  87 
  88   default:
  89     assert(type() == ZPageTypeLarge, "Invalid page type");
  90     return ZObjectAlignmentLargeShift;
  91   }
  92 }
  93 
  94 inline size_t ZPage::object_alignment() const {
  95   switch (type()) {
  96   case ZPageTypeSmall:
  97     return ZObjectAlignmentSmall;
  98 
  99   case ZPageTypeMedium:
 100     return ZObjectAlignmentMedium;
 101 
 102   default:
 103     assert(type() == ZPageTypeLarge, "Invalid page type");
 104     return ZObjectAlignmentLarge;
 105   }
 106 }
 107 
 108 inline uint8_t ZPage::type() const {
 109   return _type;
 110 }
 111 
 112 inline uintptr_t ZPage::start() const {
 113   return _virtual.start();
 114 }
 115 
 116 inline uintptr_t ZPage::end() const {
 117   return _virtual.end();
 118 }
 119 
 120 inline size_t ZPage::size() const {
 121   return _virtual.size();
 122 }
 123 
 124 inline uintptr_t ZPage::top() const {
 125   return _top;
 126 }
 127 
 128 inline size_t ZPage::remaining() const {
 129   return end() - top();
 130 }
 131 
 132 inline const ZPhysicalMemory& ZPage::physical_memory() const {
 133   return _physical;
 134 }
 135 
 136 inline const ZVirtualMemory& ZPage::virtual_memory() const {
 137   return _virtual;
 138 }
 139 
 140 inline uint8_t ZPage::numa_id() {
 141   if (_numa_id == (uint8_t)-1) {
 142     _numa_id = (uint8_t)ZNUMA::memory_id(ZAddress::good(start()));
 143   }
 144 
 145   return _numa_id;
 146 }
 147 
 148 inline bool ZPage::is_allocating() const {
 149   return _seqnum == ZGlobalSeqNum;
 150 }
 151 
 152 inline bool ZPage::is_relocatable() const {
 153   return _seqnum < ZGlobalSeqNum;
 154 }
 155 
 156 inline bool ZPage::is_mapped() const {
 157   return _seqnum > 0;
 158 }
 159 
 160 inline void ZPage::set_pre_mapped() {
 161   // The _seqnum variable is also used to signal that the virtual and physical
 162   // memory has been mapped. So, we need to set it to non-zero when the memory
 163   // has been pre-mapped.
 164   _seqnum = 1;
 165 }
 166 
 167 inline uint64_t ZPage::last_used() const {
 168   return _last_used;
 169 }
 170 
 171 inline void ZPage::set_last_used() {
 172   _last_used = os::elapsedTime();
 173 }
 174 
 175 inline bool ZPage::is_in(uintptr_t addr) const {
 176   const uintptr_t offset = ZAddress::offset(addr);
 177   return offset >= start() && offset < top();
 178 }
 179 
 180 inline uintptr_t ZPage::block_start(uintptr_t addr) const {
 181   if (block_is_obj(addr)) {
 182     return addr;
 183   } else {
 184     return ZAddress::good(top());
 185   }
 186 }
 187 
 188 inline bool ZPage::block_is_obj(uintptr_t addr) const {
 189   return ZAddress::offset(addr) < top();
 190 }
 191 
 192 inline bool ZPage::is_marked() const {
 193   assert(is_relocatable(), "Invalid page state");
 194   return _livemap.is_marked();
 195 }
 196 
 197 inline bool ZPage::is_object_marked(uintptr_t addr) const {
 198   const size_t index = ((ZAddress::offset(addr) - start()) >> object_alignment_shift()) * 2;
 199   return _livemap.get(index);
 200 }
 201 
 202 inline bool ZPage::is_object_strongly_marked(uintptr_t addr) const {
 203   const size_t index = ((ZAddress::offset(addr) - start()) >> object_alignment_shift()) * 2;
 204   return _livemap.get(index + 1);
 205 }
 206 
 207 inline bool ZPage::is_object_live(uintptr_t addr) const {
 208   return is_allocating() || is_object_marked(addr);
 209 }
 210 
 211 inline bool ZPage::is_object_strongly_live(uintptr_t addr) const {
 212   return is_allocating() || is_object_strongly_marked(addr);
 213 }
 214 
 215 inline bool ZPage::mark_object(uintptr_t addr, bool finalizable, bool& inc_live) {
 216   assert(ZAddress::is_marked(addr), "Invalid address");
 217   assert(is_relocatable(), "Invalid page state");
 218   assert(is_in(addr), "Invalid address");
 219 
 220   // Set mark bit
 221   const size_t index = ((ZAddress::offset(addr) - start()) >> object_alignment_shift()) * 2;
 222   return _livemap.set_atomic(index, finalizable, inc_live);
 223 }
 224 
 225 inline void ZPage::inc_live_atomic(uint32_t objects, size_t bytes) {
 226   _livemap.inc_live_atomic(objects, bytes);
 227 }
 228 
 229 inline uint32_t ZPage::live_objects() const {
 230   assert(is_marked(), "Should be marked");
 231   return _livemap.live_objects();
 232 }
 233 
 234 inline size_t ZPage::live_bytes() const {
 235   assert(is_marked(), "Should be marked");
 236   return _livemap.live_bytes();
 237 }
 238 
 239 inline void ZPage::object_iterate(ObjectClosure* cl) {
 240   _livemap.iterate(cl, ZAddress::good(start()), object_alignment_shift());
 241 }
 242 
 243 inline uintptr_t ZPage::alloc_object(size_t size) {
 244   assert(is_allocating(), "Invalid state");
 245 
 246   const size_t aligned_size = align_up(size, object_alignment());
 247   const uintptr_t addr = top();
 248   const uintptr_t new_top = addr + aligned_size;
 249 
 250   if (new_top > end()) {
 251     // Not enough space left
 252     return 0;
 253   }
 254 
 255   _top = new_top;
 256 
 257   return ZAddress::good(addr);
 258 }
 259 
 260 inline uintptr_t ZPage::alloc_object_atomic(size_t size) {
 261   assert(is_allocating(), "Invalid state");
 262 
 263   const size_t aligned_size = align_up(size, object_alignment());
 264   uintptr_t addr = top();
 265 
 266   for (;;) {
 267     const uintptr_t new_top = addr + aligned_size;
 268     if (new_top > end()) {
 269       // Not enough space left
 270       return 0;
 271     }
 272 
 273     const uintptr_t prev_top = Atomic::cmpxchg(new_top, &_top, addr);
 274     if (prev_top == addr) {
 275       // Success
 276       return ZAddress::good(addr);
 277     }
 278 
 279     // Retry
 280     addr = prev_top;
 281   }
 282 }
 283 
 284 inline bool ZPage::undo_alloc_object(uintptr_t addr, size_t size) {
 285   assert(is_allocating(), "Invalid state");
 286 
 287   const uintptr_t offset = ZAddress::offset(addr);
 288   const size_t aligned_size = align_up(size, object_alignment());
 289   const uintptr_t old_top = top();
 290   const uintptr_t new_top = old_top - aligned_size;
 291 
 292   if (new_top != offset) {
 293     // Failed to undo allocation, not the last allocated object
 294     return false;
 295   }
 296 
 297   _top = new_top;
 298 
 299   // Success
 300   return true;
 301 }
 302 
 303 inline bool ZPage::undo_alloc_object_atomic(uintptr_t addr, size_t size) {
 304   assert(is_allocating(), "Invalid state");
 305 
 306   const uintptr_t offset = ZAddress::offset(addr);
 307   const size_t aligned_size = align_up(size, object_alignment());
 308   uintptr_t old_top = top();
 309 
 310   for (;;) {
 311     const uintptr_t new_top = old_top - aligned_size;
 312     if (new_top != offset) {
 313       // Failed to undo allocation, not the last allocated object
 314       return false;
 315     }
 316 
 317     const uintptr_t prev_top = Atomic::cmpxchg(new_top, &_top, old_top);
 318     if (prev_top == old_top) {
 319       // Success
 320       return true;
 321     }
 322 
 323     // Retry
 324     old_top = prev_top;
 325   }
 326 }
 327 
 328 #endif // SHARE_GC_Z_ZPAGE_INLINE_HPP