1 /*
   2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_SHARED_OOPSTORAGE_INLINE_HPP
  26 #define SHARE_GC_SHARED_OOPSTORAGE_INLINE_HPP
  27 
  28 #include "gc/shared/oopStorage.hpp"
  29 #include "metaprogramming/conditional.hpp"
  30 #include "metaprogramming/isConst.hpp"
  31 #include "oops/oop.hpp"
  32 #include "runtime/safepoint.hpp"
  33 #include "utilities/align.hpp"
  34 #include "utilities/count_trailing_zeros.hpp"
  35 #include "utilities/debug.hpp"
  36 #include "utilities/globalDefinitions.hpp"
  37 
  38 // Array of all active blocks.  Refcounted for lock-free reclaim of
  39 // old array when a new array is allocated for expansion.
  40 class OopStorage::ActiveArray {
  41   friend class OopStorage::TestAccess;
  42 
  43   size_t _size;
  44   volatile size_t _block_count;
  45   mutable volatile int _refcount;
  46   // Block* _blocks[1];            // Pseudo flexible array member.
  47 
  48   ActiveArray(size_t size);
  49   ~ActiveArray();
  50 
  51   // Noncopyable
  52   ActiveArray(const ActiveArray&);
  53   ActiveArray& operator=(const ActiveArray&);
  54 
  55   static size_t blocks_offset();
  56   Block* const* base_ptr() const;
  57 
  58   Block* const* block_ptr(size_t index) const;
  59   Block** block_ptr(size_t index);
  60 
  61 public:
  62   static ActiveArray* create(size_t size, AllocFailType alloc_fail = AllocFailStrategy::EXIT_OOM);
  63   static void destroy(ActiveArray* ba);
  64 
  65   inline Block* at(size_t i) const;
  66 
  67   size_t size() const;
  68   size_t block_count() const;
  69   size_t block_count_acquire() const;
  70   void increment_refcount() const;
  71   bool decrement_refcount() const; // Return true if zero, otherwise false
  72 
  73   // Support for OopStorage::allocate.
  74   // Add block to the end of the array.  Updates block count at the
  75   // end of the operation, with a release_store. Returns true if the
  76   // block was added, false if there was no room available.
  77   // precondition: owner's _allocation_mutex is locked, or at safepoint.
  78   bool push(Block* block);
  79 
  80   // Support OopStorage::delete_empty_blocks_xxx operations.
  81   // Remove block from the array.
  82   // precondition: block must be present at its active_index element.
  83   void remove(Block* block);
  84 
  85   void copy_from(const ActiveArray* from);
  86 };
  87 
  88 inline size_t OopStorage::ActiveArray::blocks_offset() {
  89   return align_up(sizeof(ActiveArray), sizeof(Block*));
  90 }
  91 
  92 inline OopStorage::Block* const* OopStorage::ActiveArray::base_ptr() const {
  93   const void* ptr = reinterpret_cast<const char*>(this) + blocks_offset();
  94   return reinterpret_cast<Block* const*>(ptr);
  95 }
  96 
  97 inline OopStorage::Block* const* OopStorage::ActiveArray::block_ptr(size_t index) const {
  98   return base_ptr() + index;
  99 }
 100 
 101 inline OopStorage::Block** OopStorage::ActiveArray::block_ptr(size_t index) {
 102   return const_cast<Block**>(base_ptr() + index);
 103 }
 104 
 105 inline OopStorage::Block* OopStorage::ActiveArray::at(size_t index) const {
 106   assert(index < _block_count, "precondition");
 107   return *block_ptr(index);
 108 }
 109 
 110 // A Block has an embedded AllocationListEntry to provide the links between
 111 // Blocks in a AllocationList.
 112 class OopStorage::AllocationListEntry {
 113   friend class OopStorage::AllocationList;
 114 
 115   // Members are mutable, and we deal exclusively with pointers to
 116   // const, to make const blocks easier to use; a block being const
 117   // doesn't prevent modifying its list state.
 118   mutable const Block* _prev;
 119   mutable const Block* _next;
 120 
 121   // Noncopyable.
 122   AllocationListEntry(const AllocationListEntry&);
 123   AllocationListEntry& operator=(const AllocationListEntry&);
 124 
 125 public:
 126   AllocationListEntry();
 127   ~AllocationListEntry();
 128 };
 129 
 130 // Fixed-sized array of oops, plus bookkeeping data.
 131 // All blocks are in the storage's _active_array, at the block's _active_index.
 132 // Non-full blocks are in the storage's _allocation_list, linked through the
 133 // block's _allocation_list_entry.  Empty blocks are at the end of that list.
 134 class OopStorage::Block /* No base class, to avoid messing up alignment. */ {
 135   // _data must be the first non-static data member, for alignment.
 136   oop _data[BitsPerWord];
 137   static const unsigned _data_pos = 0; // Position of _data.
 138 
 139   volatile uintx _allocated_bitmask; // One bit per _data element.
 140   const OopStorage* _owner;
 141   void* _memory;              // Unaligned storage containing block.
 142   size_t _active_index;
 143   AllocationListEntry _allocation_list_entry;
 144   Block* volatile _deferred_updates_next;
 145   volatile uintx _release_refcount;
 146 
 147   Block(const OopStorage* owner, void* memory);
 148   ~Block();
 149 
 150   void check_index(unsigned index) const;
 151   unsigned get_index(const oop* ptr) const;
 152 
 153   template<typename F, typename BlockPtr>
 154   static bool iterate_impl(F f, BlockPtr b);
 155 
 156   // Noncopyable.
 157   Block(const Block&);
 158   Block& operator=(const Block&);
 159 
 160 public:
 161   const AllocationListEntry& allocation_list_entry() const;
 162 
 163   static size_t allocation_size();
 164   static size_t allocation_alignment_shift();
 165 
 166   oop* get_pointer(unsigned index);
 167   const oop* get_pointer(unsigned index) const;
 168 
 169   uintx bitmask_for_index(unsigned index) const;
 170   uintx bitmask_for_entry(const oop* ptr) const;
 171 
 172   // Allocation bitmask accessors are racy.
 173   bool is_full() const;
 174   bool is_empty() const;
 175   uintx allocated_bitmask() const;
 176   bool is_deletable() const;
 177 
 178   Block* deferred_updates_next() const;
 179   void set_deferred_updates_next(Block* new_next);
 180 
 181   bool contains(const oop* ptr) const;
 182 
 183   size_t active_index() const;
 184   void set_active_index(size_t index);
 185   static size_t active_index_safe(const Block* block); // Returns 0 if access fails.
 186 
 187   // Returns NULL if ptr is not in a block or not allocated in that block.
 188   static Block* block_for_ptr(const OopStorage* owner, const oop* ptr);
 189 
 190   oop* allocate();
 191   static Block* new_block(const OopStorage* owner);
 192   static void delete_block(const Block& block);
 193 
 194   void release_entries(uintx releasing, Block* volatile* deferred_list);
 195 
 196   template<typename F> bool iterate(F f);
 197   template<typename F> bool iterate(F f) const;
 198 }; // class Block
 199 
 200 inline OopStorage::Block* OopStorage::AllocationList::head() {
 201   return const_cast<Block*>(_head);
 202 }
 203 
 204 inline OopStorage::Block* OopStorage::AllocationList::tail() {
 205   return const_cast<Block*>(_tail);
 206 }
 207 
 208 inline const OopStorage::Block* OopStorage::AllocationList::chead() const {
 209   return _head;
 210 }
 211 
 212 inline const OopStorage::Block* OopStorage::AllocationList::ctail() const {
 213   return _tail;
 214 }
 215 
 216 inline OopStorage::Block* OopStorage::AllocationList::prev(Block& block) {
 217   return const_cast<Block*>(block.allocation_list_entry()._prev);
 218 }
 219 
 220 inline OopStorage::Block* OopStorage::AllocationList::next(Block& block) {
 221   return const_cast<Block*>(block.allocation_list_entry()._next);
 222 }
 223 
 224 inline const OopStorage::Block* OopStorage::AllocationList::prev(const Block& block) const {
 225   return block.allocation_list_entry()._prev;
 226 }
 227 
 228 inline const OopStorage::Block* OopStorage::AllocationList::next(const Block& block) const {
 229   return block.allocation_list_entry()._next;
 230 }
 231 
 232 template<typename Closure>
 233 class OopStorage::OopFn {
 234 public:
 235   explicit OopFn(Closure* cl) : _cl(cl) {}
 236 
 237   template<typename OopPtr>     // [const] oop*
 238   bool operator()(OopPtr ptr) const {
 239     _cl->do_oop(ptr);
 240     return true;
 241   }
 242 
 243 private:
 244   Closure* _cl;
 245 };
 246 
 247 template<typename Closure>
 248 inline OopStorage::OopFn<Closure> OopStorage::oop_fn(Closure* cl) {
 249   return OopFn<Closure>(cl);
 250 }
 251 
 252 template<typename IsAlive, typename F>
 253 class OopStorage::IfAliveFn {
 254 public:
 255   IfAliveFn(IsAlive* is_alive, F f) : _is_alive(is_alive), _f(f) {}
 256 
 257   bool operator()(oop* ptr) const {
 258     bool result = true;
 259     oop v = *ptr;
 260     if (v != NULL) {
 261       if (_is_alive->do_object_b(v)) {
 262         result = _f(ptr);
 263       } else {
 264         *ptr = NULL;            // Clear dead value.
 265       }
 266     }
 267     return result;
 268   }
 269 
 270 private:
 271   IsAlive* _is_alive;
 272   F _f;
 273 };
 274 
 275 template<typename IsAlive, typename F>
 276 inline OopStorage::IfAliveFn<IsAlive, F> OopStorage::if_alive_fn(IsAlive* is_alive, F f) {
 277   return IfAliveFn<IsAlive, F>(is_alive, f);
 278 }
 279 
 280 template<typename F>
 281 class OopStorage::SkipNullFn {
 282 public:
 283   SkipNullFn(F f) : _f(f) {}
 284 
 285   template<typename OopPtr>     // [const] oop*
 286   bool operator()(OopPtr ptr) const {
 287     return (*ptr != NULL) ? _f(ptr) : true;
 288   }
 289 
 290 private:
 291   F _f;
 292 };
 293 
 294 template<typename F>
 295 inline OopStorage::SkipNullFn<F> OopStorage::skip_null_fn(F f) {
 296   return SkipNullFn<F>(f);
 297 }
 298 
 299 // Inline Block accesses for use in iteration loops.
 300 
 301 inline const OopStorage::AllocationListEntry& OopStorage::Block::allocation_list_entry() const {
 302   return _allocation_list_entry;
 303 }
 304 
 305 inline void OopStorage::Block::check_index(unsigned index) const {
 306   assert(index < ARRAY_SIZE(_data), "Index out of bounds: %u", index);
 307 }
 308 
 309 inline oop* OopStorage::Block::get_pointer(unsigned index) {
 310   check_index(index);
 311   return &_data[index];
 312 }
 313 
 314 inline const oop* OopStorage::Block::get_pointer(unsigned index) const {
 315   check_index(index);
 316   return &_data[index];
 317 }
 318 
 319 inline uintx OopStorage::Block::allocated_bitmask() const {
 320   return _allocated_bitmask;
 321 }
 322 
 323 inline uintx OopStorage::Block::bitmask_for_index(unsigned index) const {
 324   check_index(index);
 325   return uintx(1) << index;
 326 }
 327 
 328 // Provide const or non-const iteration, depending on whether BlockPtr
 329 // is const Block* or Block*, respectively.
 330 template<typename F, typename BlockPtr> // BlockPtr := [const] Block*
 331 inline bool OopStorage::Block::iterate_impl(F f, BlockPtr block) {
 332   uintx bitmask = block->allocated_bitmask();
 333   while (bitmask != 0) {
 334     unsigned index = count_trailing_zeros(bitmask);
 335     bitmask ^= block->bitmask_for_index(index);
 336     if (!f(block->get_pointer(index))) {
 337       return false;
 338     }
 339   }
 340   return true;
 341 }
 342 
 343 template<typename F>
 344 inline bool OopStorage::Block::iterate(F f) {
 345   return iterate_impl(f, this);
 346 }
 347 
 348 template<typename F>
 349 inline bool OopStorage::Block::iterate(F f) const {
 350   return iterate_impl(f, this);
 351 }
 352 
 353 //////////////////////////////////////////////////////////////////////////////
 354 // Support for serial iteration, always at a safepoint.
 355 
 356 // Provide const or non-const iteration, depending on whether Storage is
 357 // const OopStorage* or OopStorage*, respectively.
 358 template<typename F, typename Storage> // Storage := [const] OopStorage
 359 inline bool OopStorage::iterate_impl(F f, Storage* storage) {
 360   assert_at_safepoint();
 361   // Propagate const/non-const iteration to the block layer, by using
 362   // const or non-const blocks as corresponding to Storage.
 363   typedef typename Conditional<IsConst<Storage>::value, const Block*, Block*>::type BlockPtr;
 364   ActiveArray* blocks = storage->_active_array;
 365   size_t limit = blocks->block_count();
 366   for (size_t i = 0; i < limit; ++i) {
 367     BlockPtr block = blocks->at(i);
 368     if (!block->iterate(f)) {
 369       return false;
 370     }
 371   }
 372   return true;
 373 }
 374 
 375 template<typename F>
 376 inline bool OopStorage::iterate_safepoint(F f) {
 377   return iterate_impl(f, this);
 378 }
 379 
 380 template<typename F>
 381 inline bool OopStorage::iterate_safepoint(F f) const {
 382   return iterate_impl(f, this);
 383 }
 384 
 385 template<typename Closure>
 386 inline void OopStorage::oops_do(Closure* cl) {
 387   iterate_safepoint(oop_fn(cl));
 388 }
 389 
 390 template<typename Closure>
 391 inline void OopStorage::oops_do(Closure* cl) const {
 392   iterate_safepoint(oop_fn(cl));
 393 }
 394 
 395 template<typename Closure>
 396 inline void OopStorage::weak_oops_do(Closure* cl) {
 397   iterate_safepoint(skip_null_fn(oop_fn(cl)));
 398 }
 399 
 400 template<typename IsAliveClosure, typename Closure>
 401 inline void OopStorage::weak_oops_do(IsAliveClosure* is_alive, Closure* cl) {
 402   iterate_safepoint(if_alive_fn(is_alive, oop_fn(cl)));
 403 }
 404 
 405 #endif // include guard