1 /*
   2  * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_SHARED_OOPSTORAGE_HPP
  26 #define SHARE_GC_SHARED_OOPSTORAGE_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 #include "oops/oop.hpp"
  30 #include "utilities/globalDefinitions.hpp"
  31 #include "utilities/macros.hpp"
  32 #include "utilities/singleWriterSynchronizer.hpp"
  33 
  34 class Mutex;
  35 class outputStream;
  36 
  37 // OopStorage supports management of off-heap references to objects allocated
  38 // in the Java heap.  An OopStorage object provides a set of Java object
  39 // references (oop values), which clients refer to via oop* handles to the
  40 // associated OopStorage entries.  Clients allocate entries to create a
  41 // (possibly weak) reference to a Java object, use that reference, and release
  42 // the reference when no longer needed.
  43 //
  44 // The garbage collector must know about all OopStorage objects and their
  45 // reference strength.  OopStorage provides the garbage collector with support
  46 // for iteration over all the allocated entries.
  47 //
  48 // There are several categories of interaction with an OopStorage object.
  49 //
  50 // (1) allocation and release of entries, by the mutator or the VM.
  51 // (2) iteration by the garbage collector, possibly concurrent with mutator.
  52 // (3) iteration by other, non-GC, tools (only at safepoints).
  53 // (4) cleanup of unused internal storage, possibly concurrent with mutator.
  54 //
  55 // A goal of OopStorage is to make these interactions thread-safe, while
  56 // minimizing potential lock contention issues within and between these
  57 // categories.  In particular, support for concurrent iteration by the garbage
  58 // collector, under certain restrictions, is required.  Further, it must not
  59 // block nor be blocked by other operations for long periods.
  60 //
  61 // Internally, OopStorage is a set of Block objects, from which entries are
  62 // allocated and released.  A block contains an oop[] and a bitmask indicating
  63 // which entries are in use (have been allocated and not yet released).  New
  64 // blocks are constructed and added to the storage object when an entry
  65 // allocation request is made and there are no blocks with unused entries.
  66 // Blocks may be removed and deleted when empty.
  67 //
  68 // There are two important (and somewhat intertwined) protocols governing
  69 // concurrent access to a storage object.  These are the Concurrent Iteration
  70 // Protocol and the Allocation Protocol.  See the ParState class for a
  71 // discussion of concurrent iteration and the management of thread
  72 // interactions for this protocol.  Similarly, see the allocate() function for
  73 // a discussion of allocation.
  74 
  75 class OopStorage : public CHeapObj<mtGC> {
  76 public:
  77   OopStorage(const char* name, Mutex* allocation_mutex, Mutex* active_mutex);
  78   ~OopStorage();
  79 
  80   // These count and usage accessors are racy unless at a safepoint.
  81 
  82   // The number of allocated and not yet released entries.
  83   size_t allocation_count() const;
  84 
  85   // The number of blocks of entries.  Useful for sizing parallel iteration.
  86   size_t block_count() const;
  87 
  88   // Total number of blocks * memory allocation per block, plus
  89   // bookkeeping overhead, including this storage object.
  90   size_t total_memory_usage() const;
  91 
  92   enum EntryStatus {
  93     INVALID_ENTRY,
  94     UNALLOCATED_ENTRY,
  95     ALLOCATED_ENTRY
  96   };
  97 
  98   // Locks _allocation_mutex.
  99   // precondition: ptr != NULL.
 100   EntryStatus allocation_status(const oop* ptr) const;
 101 
 102   // Allocates and returns a new entry.  Returns NULL if memory allocation
 103   // failed.  Locks _allocation_mutex.
 104   // postcondition: *result == NULL.
 105   oop* allocate();
 106 
 107   // Deallocates ptr.  No locking.
 108   // precondition: ptr is a valid allocated entry.
 109   // precondition: *ptr == NULL.
 110   void release(const oop* ptr);
 111 
 112   // Releases all the ptrs.  Possibly faster than individual calls to
 113   // release(oop*).  Best if ptrs is sorted by address.  No locking.
 114   // precondition: All elements of ptrs are valid allocated entries.
 115   // precondition: *ptrs[i] == NULL, for i in [0,size).
 116   void release(const oop* const* ptrs, size_t size);
 117 
 118   // Applies f to each allocated entry's location.  f must be a function or
 119   // function object.  Assume p is either a const oop* or an oop*, depending
 120   // on whether the associated storage is const or non-const, respectively.
 121   // Then f(p) must be a valid expression.  The result of invoking f(p) must
 122   // be implicitly convertible to bool.  Iteration terminates and returns
 123   // false if any invocation of f returns false.  Otherwise, the result of
 124   // iteration is true.
 125   // precondition: at safepoint.
 126   template<typename F> inline bool iterate_safepoint(F f);
 127   template<typename F> inline bool iterate_safepoint(F f) const;
 128 
 129   // oops_do and weak_oops_do are wrappers around iterate_safepoint, providing
 130   // an adaptation layer allowing the use of existing is-alive closures and
 131   // OopClosures.  Assume p is either const oop* or oop*, depending on whether
 132   // the associated storage is const or non-const, respectively.  Then
 133   //
 134   // - closure->do_oop(p) must be a valid expression whose value is ignored.
 135   //
 136   // - is_alive->do_object_b(*p) must be a valid expression whose value is
 137   // convertible to bool.
 138   //
 139   // For weak_oops_do, if *p == NULL then neither is_alive nor closure will be
 140   // invoked for p.  If is_alive->do_object_b(*p) is false, then closure will
 141   // not be invoked on p, and *p will be set to NULL.
 142 
 143   template<typename Closure> inline void oops_do(Closure* closure);
 144   template<typename Closure> inline void oops_do(Closure* closure) const;
 145   template<typename Closure> inline void weak_oops_do(Closure* closure);
 146 
 147   template<typename IsAliveClosure, typename Closure>
 148   inline void weak_oops_do(IsAliveClosure* is_alive, Closure* closure);
 149 
 150   // Parallel iteration is for the exclusive use of the GC.
 151   // Other clients must use serial iteration.
 152   template<bool concurrent, bool is_const> class ParState;
 153 
 154   // Service thread cleanup support.
 155 
 156   // Called by the service thread to process any pending cleanups for this
 157   // storage object.  Drains the _deferred_updates list, and deletes empty
 158   // blocks.  Stops deleting if there is an in-progress concurrent
 159   // iteration.  Locks both the _allocation_mutex and the _active_mutex, and
 160   // may safepoint.  Deletion may be throttled, with only some available
 161   // work performed, in order to allow other Service thread subtasks to run.
 162   // Returns true if there may be more work to do, false if nothing to do.
 163   bool delete_empty_blocks();
 164 
 165   // Called by safepoint cleanup to notify the service thread (via
 166   // Service_lock) that there may be some OopStorage objects with pending
 167   // cleanups to process.
 168   static void trigger_cleanup_if_needed();
 169 
 170   // Called by the service thread (while holding Service_lock) to to test
 171   // for pending cleanup requests, and resets the request state to allow
 172   // recognition of new requests.  Returns true if there was a pending
 173   // request.
 174   static bool has_cleanup_work_and_reset();
 175 
 176   // Debugging and logging support.
 177   const char* name() const;
 178   void print_on(outputStream* st) const PRODUCT_RETURN;
 179 
 180   // Provides access to storage internals, for unit testing.
 181   // Declare, but not define, the public class OopStorage::TestAccess.
 182   // That class is defined as part of the unit-test. It "exports" the needed
 183   // private types by providing public typedefs for them.
 184   class TestAccess;
 185 
 186 private:
 187   class Block;                  // Fixed-size array of oops, plus bookkeeping.
 188   class ActiveArray;            // Array of Blocks, plus bookkeeping.
 189   class AllocationListEntry;    // Provides AllocationList links in a Block.
 190 
 191   // Doubly-linked list of Blocks.
 192   class AllocationList {
 193     const Block* _head;
 194     const Block* _tail;
 195 
 196     // Noncopyable.
 197     AllocationList(const AllocationList&);
 198     AllocationList& operator=(const AllocationList&);
 199 
 200   public:
 201     AllocationList();
 202     ~AllocationList();
 203 
 204     Block* head();
 205     Block* tail();
 206     const Block* chead() const;
 207     const Block* ctail() const;
 208 
 209     Block* prev(Block& block);
 210     Block* next(Block& block);
 211 
 212     const Block* prev(const Block& block) const;
 213     const Block* next(const Block& block) const;
 214 
 215     void push_front(const Block& block);
 216     void push_back(const Block& block);
 217     void unlink(const Block& block);
 218   };
 219 
 220 private:
 221   const char* _name;
 222   ActiveArray* _active_array;
 223   AllocationList _allocation_list;
 224   Block* volatile _deferred_updates;
 225   Mutex* _allocation_mutex;
 226   Mutex* _active_mutex;
 227 
 228   // Volatile for racy unlocked accesses.
 229   volatile size_t _allocation_count;
 230 
 231   // Protection for _active_array.
 232   mutable SingleWriterSynchronizer _protect_active;
 233 
 234   // mutable because this gets set even for const iteration.
 235   mutable int _concurrent_iteration_count;
 236 
 237   volatile bool _needs_cleanup;
 238 
 239   bool try_add_block();
 240   Block* block_for_allocation();
 241 
 242   Block* find_block_or_null(const oop* ptr) const;
 243   void delete_empty_block(const Block& block);
 244   bool reduce_deferred_updates();
 245   void record_needs_cleanup();
 246 
 247   // Managing _active_array.
 248   bool expand_active_array();
 249   void replace_active_array(ActiveArray* new_array);
 250   ActiveArray* obtain_active_array() const;
 251   void relinquish_block_array(ActiveArray* array) const;
 252   class WithActiveArray;        // RAII helper for active array access.
 253 
 254   template<typename F, typename Storage>
 255   static bool iterate_impl(F f, Storage* storage);
 256 
 257   // Implementation support for parallel iteration
 258   class BasicParState;
 259 
 260   // Wrapper for OopClosure-style function, so it can be used with
 261   // iterate.  Assume p is of type oop*.  Then cl->do_oop(p) must be a
 262   // valid expression whose value may be ignored.
 263   template<typename Closure> class OopFn;
 264   template<typename Closure> static OopFn<Closure> oop_fn(Closure* cl);
 265 
 266   // Wrapper for BoolObjectClosure + iteration handler pair, so they
 267   // can be used with iterate.
 268   template<typename IsAlive, typename F> class IfAliveFn;
 269   template<typename IsAlive, typename F>
 270   static IfAliveFn<IsAlive, F> if_alive_fn(IsAlive* is_alive, F f);
 271 
 272   // Wrapper for iteration handler, automatically skipping NULL entries.
 273   template<typename F> class SkipNullFn;
 274   template<typename F> static SkipNullFn<F> skip_null_fn(F f);
 275 };
 276 
 277 #endif // SHARE_GC_SHARED_OOPSTORAGE_HPP