1 /*
   2  * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_SHARED_OOPSTORAGE_INLINE_HPP
  26 #define SHARE_GC_SHARED_OOPSTORAGE_INLINE_HPP
  27 
  28 #include "gc/shared/oopStorage.hpp"
  29 #include "metaprogramming/conditional.hpp"
  30 #include "metaprogramming/isConst.hpp"
  31 #include "oops/oop.hpp"
  32 #include "runtime/safepoint.hpp"
  33 #include "utilities/align.hpp"
  34 #include "utilities/count_trailing_zeros.hpp"
  35 #include "utilities/debug.hpp"
  36 #include "utilities/globalDefinitions.hpp"
  37 #include "utilities/macros.hpp"
  38 
  39 // Array of all active blocks.  Refcounted for lock-free reclaim of
  40 // old array when a new array is allocated for expansion.
  41 class OopStorage::ActiveArray {
  42   friend class OopStorage::TestAccess;
  43 
  44   size_t _size;
  45   volatile size_t _block_count;
  46   mutable volatile int _refcount;
  47   // Block* _blocks[1];            // Pseudo flexible array member.
  48 
  49   ActiveArray(size_t size);
  50   ~ActiveArray();
  51 
  52   NONCOPYABLE(ActiveArray);
  53 
  54   static size_t blocks_offset();
  55   Block* const* base_ptr() const;
  56 
  57   Block* const* block_ptr(size_t index) const;
  58   Block** block_ptr(size_t index);
  59 
  60 public:
  61   static ActiveArray* create(size_t size, AllocFailType alloc_fail = AllocFailStrategy::EXIT_OOM);
  62   static void destroy(ActiveArray* ba);
  63 
  64   inline Block* at(size_t i) const;
  65 
  66   size_t size() const;
  67   size_t block_count() const;
  68   size_t block_count_acquire() const;
  69   void increment_refcount() const;
  70   bool decrement_refcount() const; // Return true if zero, otherwise false
  71 
  72   // Support for OopStorage::allocate.
  73   // Add block to the end of the array.  Updates block count at the
  74   // end of the operation, with a release_store. Returns true if the
  75   // block was added, false if there was no room available.
  76   // precondition: owner's _allocation_mutex is locked, or at safepoint.
  77   bool push(Block* block);
  78 
  79   // Support OopStorage::delete_empty_blocks_xxx operations.
  80   // Remove block from the array.
  81   // precondition: block must be present at its active_index element.
  82   void remove(Block* block);
  83 
  84   void copy_from(const ActiveArray* from);
  85 };
  86 
  87 inline size_t OopStorage::ActiveArray::blocks_offset() {
  88   return align_up(sizeof(ActiveArray), sizeof(Block*));
  89 }
  90 
  91 inline OopStorage::Block* const* OopStorage::ActiveArray::base_ptr() const {
  92   const void* ptr = reinterpret_cast<const char*>(this) + blocks_offset();
  93   return reinterpret_cast<Block* const*>(ptr);
  94 }
  95 
  96 inline OopStorage::Block* const* OopStorage::ActiveArray::block_ptr(size_t index) const {
  97   return base_ptr() + index;
  98 }
  99 
 100 inline OopStorage::Block** OopStorage::ActiveArray::block_ptr(size_t index) {
 101   return const_cast<Block**>(base_ptr() + index);
 102 }
 103 
 104 inline OopStorage::Block* OopStorage::ActiveArray::at(size_t index) const {
 105   assert(index < _block_count, "precondition");
 106   return *block_ptr(index);
 107 }
 108 
 109 // A Block has an embedded AllocationListEntry to provide the links between
 110 // Blocks in an AllocationList.
 111 class OopStorage::AllocationListEntry {
 112   friend class OopStorage::AllocationList;
 113 
 114   // Members are mutable, and we deal exclusively with pointers to
 115   // const, to make const blocks easier to use; a block being const
 116   // doesn't prevent modifying its list state.
 117   mutable const Block* _prev;
 118   mutable const Block* _next;
 119 
 120   NONCOPYABLE(AllocationListEntry);
 121 
 122 public:
 123   AllocationListEntry();
 124   ~AllocationListEntry();
 125 };
 126 
 127 // Fixed-sized array of oops, plus bookkeeping data.
 128 // All blocks are in the storage's _active_array, at the block's _active_index.
 129 // Non-full blocks are in the storage's _allocation_list, linked through the
 130 // block's _allocation_list_entry.  Empty blocks are at the end of that list.
 131 class OopStorage::Block /* No base class, to avoid messing up alignment. */ {
 132   // _data must be the first non-static data member, for alignment.
 133   oop _data[BitsPerWord];
 134   static const unsigned _data_pos = 0; // Position of _data.
 135 
 136   volatile uintx _allocated_bitmask; // One bit per _data element.
 137   intptr_t _owner_address;
 138   void* _memory;              // Unaligned storage containing block.
 139   size_t _active_index;
 140   AllocationListEntry _allocation_list_entry;
 141   Block* volatile _deferred_updates_next;
 142   volatile uintx _release_refcount;
 143 
 144   Block(const OopStorage* owner, void* memory);
 145   ~Block();
 146 
 147   void check_index(unsigned index) const;
 148   unsigned get_index(const oop* ptr) const;
 149 
 150   template<typename F, typename BlockPtr>
 151   static bool iterate_impl(F f, BlockPtr b);
 152 
 153   NONCOPYABLE(Block);
 154 
 155 public:
 156   const AllocationListEntry& allocation_list_entry() const;
 157 
 158   static size_t allocation_size();
 159   static size_t allocation_alignment_shift();
 160 
 161   oop* get_pointer(unsigned index);
 162   const oop* get_pointer(unsigned index) const;
 163 
 164   uintx bitmask_for_index(unsigned index) const;
 165   uintx bitmask_for_entry(const oop* ptr) const;
 166 
 167   // Allocation bitmask accessors are racy.
 168   bool is_full() const;
 169   bool is_empty() const;
 170   uintx allocated_bitmask() const;
 171 
 172   bool is_safe_to_delete() const;
 173 
 174   Block* deferred_updates_next() const;
 175   void set_deferred_updates_next(Block* new_next);
 176 
 177   bool contains(const oop* ptr) const;
 178 
 179   size_t active_index() const;
 180   void set_active_index(size_t index);
 181   static size_t active_index_safe(const Block* block); // Returns 0 if access fails.
 182 
 183   // Returns NULL if ptr is not in a block or not allocated in that block.
 184   static Block* block_for_ptr(const OopStorage* owner, const oop* ptr);
 185 
 186   oop* allocate();
 187   static Block* new_block(const OopStorage* owner);
 188   static void delete_block(const Block& block);
 189 
 190   void release_entries(uintx releasing, OopStorage* owner);
 191 
 192   template<typename F> bool iterate(F f);
 193   template<typename F> bool iterate(F f) const;
 194 }; // class Block
 195 
 196 inline OopStorage::Block* OopStorage::AllocationList::head() {
 197   return const_cast<Block*>(_head);
 198 }
 199 
 200 inline OopStorage::Block* OopStorage::AllocationList::tail() {
 201   return const_cast<Block*>(_tail);
 202 }
 203 
 204 inline const OopStorage::Block* OopStorage::AllocationList::chead() const {
 205   return _head;
 206 }
 207 
 208 inline const OopStorage::Block* OopStorage::AllocationList::ctail() const {
 209   return _tail;
 210 }
 211 
 212 inline OopStorage::Block* OopStorage::AllocationList::prev(Block& block) {
 213   return const_cast<Block*>(block.allocation_list_entry()._prev);
 214 }
 215 
 216 inline OopStorage::Block* OopStorage::AllocationList::next(Block& block) {
 217   return const_cast<Block*>(block.allocation_list_entry()._next);
 218 }
 219 
 220 inline const OopStorage::Block* OopStorage::AllocationList::prev(const Block& block) const {
 221   return block.allocation_list_entry()._prev;
 222 }
 223 
 224 inline const OopStorage::Block* OopStorage::AllocationList::next(const Block& block) const {
 225   return block.allocation_list_entry()._next;
 226 }
 227 
 228 template<typename Closure>
 229 class OopStorage::OopFn {
 230 public:
 231   explicit OopFn(Closure* cl) : _cl(cl) {}
 232 
 233   template<typename OopPtr>     // [const] oop*
 234   bool operator()(OopPtr ptr) const {
 235     _cl->do_oop(ptr);
 236     return true;
 237   }
 238 
 239 private:
 240   Closure* _cl;
 241 };
 242 
 243 template<typename Closure>
 244 inline OopStorage::OopFn<Closure> OopStorage::oop_fn(Closure* cl) {
 245   return OopFn<Closure>(cl);
 246 }
 247 
 248 template<typename IsAlive, typename F>
 249 class OopStorage::IfAliveFn {
 250 public:
 251   IfAliveFn(IsAlive* is_alive, F f) : _is_alive(is_alive), _f(f) {}
 252 
 253   bool operator()(oop* ptr) const {
 254     bool result = true;
 255     oop v = *ptr;
 256     if (v != NULL) {
 257       if (_is_alive->do_object_b(v)) {
 258         result = _f(ptr);
 259       } else {
 260         *ptr = NULL;            // Clear dead value.
 261       }
 262     }
 263     return result;
 264   }
 265 
 266 private:
 267   IsAlive* _is_alive;
 268   F _f;
 269 };
 270 
 271 template<typename IsAlive, typename F>
 272 inline OopStorage::IfAliveFn<IsAlive, F> OopStorage::if_alive_fn(IsAlive* is_alive, F f) {
 273   return IfAliveFn<IsAlive, F>(is_alive, f);
 274 }
 275 
 276 template<typename F>
 277 class OopStorage::SkipNullFn {
 278 public:
 279   SkipNullFn(F f) : _f(f) {}
 280 
 281   template<typename OopPtr>     // [const] oop*
 282   bool operator()(OopPtr ptr) const {
 283     return (*ptr != NULL) ? _f(ptr) : true;
 284   }
 285 
 286 private:
 287   F _f;
 288 };
 289 
 290 template<typename F>
 291 inline OopStorage::SkipNullFn<F> OopStorage::skip_null_fn(F f) {
 292   return SkipNullFn<F>(f);
 293 }
 294 
 295 // Inline Block accesses for use in iteration loops.
 296 
 297 inline const OopStorage::AllocationListEntry& OopStorage::Block::allocation_list_entry() const {
 298   return _allocation_list_entry;
 299 }
 300 
 301 inline void OopStorage::Block::check_index(unsigned index) const {
 302   assert(index < ARRAY_SIZE(_data), "Index out of bounds: %u", index);
 303 }
 304 
 305 inline oop* OopStorage::Block::get_pointer(unsigned index) {
 306   check_index(index);
 307   return &_data[index];
 308 }
 309 
 310 inline const oop* OopStorage::Block::get_pointer(unsigned index) const {
 311   check_index(index);
 312   return &_data[index];
 313 }
 314 
 315 inline uintx OopStorage::Block::allocated_bitmask() const {
 316   return _allocated_bitmask;
 317 }
 318 
 319 inline uintx OopStorage::Block::bitmask_for_index(unsigned index) const {
 320   check_index(index);
 321   return uintx(1) << index;
 322 }
 323 
 324 // Provide const or non-const iteration, depending on whether BlockPtr
 325 // is const Block* or Block*, respectively.
 326 template<typename F, typename BlockPtr> // BlockPtr := [const] Block*
 327 inline bool OopStorage::Block::iterate_impl(F f, BlockPtr block) {
 328   uintx bitmask = block->allocated_bitmask();
 329   while (bitmask != 0) {
 330     unsigned index = count_trailing_zeros(bitmask);
 331     bitmask ^= block->bitmask_for_index(index);
 332     if (!f(block->get_pointer(index))) {
 333       return false;
 334     }
 335   }
 336   return true;
 337 }
 338 
 339 template<typename F>
 340 inline bool OopStorage::Block::iterate(F f) {
 341   return iterate_impl(f, this);
 342 }
 343 
 344 template<typename F>
 345 inline bool OopStorage::Block::iterate(F f) const {
 346   return iterate_impl(f, this);
 347 }
 348 
 349 //////////////////////////////////////////////////////////////////////////////
 350 // Support for serial iteration, always at a safepoint.
 351 
 352 // Provide const or non-const iteration, depending on whether Storage is
 353 // const OopStorage* or OopStorage*, respectively.
 354 template<typename F, typename Storage> // Storage := [const] OopStorage
 355 inline bool OopStorage::iterate_impl(F f, Storage* storage) {
 356   assert_at_safepoint();
 357   // Propagate const/non-const iteration to the block layer, by using
 358   // const or non-const blocks as corresponding to Storage.
 359   typedef typename Conditional<IsConst<Storage>::value, const Block*, Block*>::type BlockPtr;
 360   ActiveArray* blocks = storage->_active_array;
 361   size_t limit = blocks->block_count();
 362   for (size_t i = 0; i < limit; ++i) {
 363     BlockPtr block = blocks->at(i);
 364     if (!block->iterate(f)) {
 365       return false;
 366     }
 367   }
 368   return true;
 369 }
 370 
 371 template<typename F>
 372 inline bool OopStorage::iterate_safepoint(F f) {
 373   return iterate_impl(f, this);
 374 }
 375 
 376 template<typename F>
 377 inline bool OopStorage::iterate_safepoint(F f) const {
 378   return iterate_impl(f, this);
 379 }
 380 
 381 template<typename Closure>
 382 inline void OopStorage::oops_do(Closure* cl) {
 383   iterate_safepoint(oop_fn(cl));
 384 }
 385 
 386 template<typename Closure>
 387 inline void OopStorage::oops_do(Closure* cl) const {
 388   iterate_safepoint(oop_fn(cl));
 389 }
 390 
 391 template<typename Closure>
 392 inline void OopStorage::weak_oops_do(Closure* cl) {
 393   iterate_safepoint(skip_null_fn(oop_fn(cl)));
 394 }
 395 
 396 template<typename IsAliveClosure, typename Closure>
 397 inline void OopStorage::weak_oops_do(IsAliveClosure* is_alive, Closure* cl) {
 398   iterate_safepoint(if_alive_fn(is_alive, oop_fn(cl)));
 399 }
 400 
 401 #endif // SHARE_GC_SHARED_OOPSTORAGE_INLINE_HPP