1 /*
   2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_G1_SPARSEPRT_HPP
  26 #define SHARE_VM_GC_G1_SPARSEPRT_HPP
  27 
  28 #include "gc/g1/g1CollectedHeap.hpp"
  29 #include "gc/g1/heapRegion.hpp"
  30 #include "gc/shared/cardTableBarrierSet.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "runtime/mutex.hpp"
  33 #include "utilities/align.hpp"
  34 #include "utilities/globalDefinitions.hpp"
  35 
  36 // Sparse remembered set for a heap region (the "owning" region).  Maps
  37 // indices of other regions to short sequences of cards in the other region
  38 // that might contain pointers into the owner region.
  39 
  40 // These tables only expand while they are accessed in parallel --
  41 // deletions may be done in single-threaded code.  This allows us to allow
  42 // unsynchronized reads/iterations, as long as expansions caused by
  43 // insertions only enqueue old versions for deletions, but do not delete
  44 // old versions synchronously.
  45 
  46 class SparsePRTEntry: public CHeapObj<mtGC> {
  47 private:
  48   // The type of a card entry.
  49   typedef uint16_t card_elem_t;
  50 
  51   // We need to make sizeof(SparsePRTEntry) an even multiple of maximum member size,
  52   // in order to force correct alignment that could otherwise cause SIGBUS errors
  53   // when reading the member variables. This calculates the minimum number of card
  54   // array elements required to get that alignment.
  55   static const size_t card_array_alignment = sizeof(int) / sizeof(card_elem_t);
  56 
  57   RegionIdx_t _region_ind;
  58   int         _next_index;
  59   int         _next_null;
  60   // The actual cards stored in this array.
  61   // WARNING: Don't put any data members beyond this line. Card array has, in fact, variable length.
  62   // It should always be the last data member.
  63   card_elem_t _cards[card_array_alignment];
  64 
  65   // Copy the current entry's cards into "cards".
  66   inline void copy_cards(card_elem_t* cards) const;
  67 public:
  68   // Returns the size of the entry, used for entry allocation.
  69   static size_t size() { return sizeof(SparsePRTEntry) + sizeof(card_elem_t) * (cards_num() - card_array_alignment); }
  70   // Returns the size of the card array.
  71   static int cards_num() {
  72     return align_up((int)G1RSetSparseRegionEntries, (int)card_array_alignment);
  73   }
  74 
  75   // Set the region_ind to the given value, and delete all cards.
  76   inline void init(RegionIdx_t region_ind);
  77 
  78   RegionIdx_t r_ind() const { return _region_ind; }
  79   bool valid_entry() const { return r_ind() >= 0; }
  80   void set_r_ind(RegionIdx_t rind) { _region_ind = rind; }
  81 
  82   int next_index() const { return _next_index; }
  83   int* next_index_addr() { return &_next_index; }
  84   void set_next_index(int ni) { _next_index = ni; }
  85 
  86   // Returns "true" iff the entry contains the given card index.
  87   inline bool contains_card(CardIdx_t card_index) const;
  88 
  89   // Returns the number of non-NULL card entries.
  90   inline int num_valid_cards() const { return _next_null; }
  91 
  92   // Requires that the entry not contain the given card index.  If there is
  93   // space available, add the given card index to the entry and return
  94   // "true"; otherwise, return "false" to indicate that the entry is full.
  95   enum AddCardResult {
  96     overflow,
  97     found,
  98     added
  99   };
 100   inline AddCardResult add_card(CardIdx_t card_index);
 101 
 102   // Copy the current entry's cards into the "_card" array of "e."
 103   inline void copy_cards(SparsePRTEntry* e) const;
 104 
 105   inline CardIdx_t card(int i) const {
 106     assert(i >= 0, "must be nonnegative");
 107     assert(i < cards_num(), "range checking");
 108     return (CardIdx_t)_cards[i];
 109   }
 110 };
 111 
 112 class RSHashTable : public CHeapObj<mtGC> {
 113 
 114   friend class RSHashTableIter;
 115 
 116 
 117   // Inverse maximum hash table occupancy used.
 118   static float TableOccupancyFactor;
 119 
 120   size_t _num_entries;
 121 
 122   size_t _capacity;
 123   size_t _capacity_mask;
 124   size_t _occupied_entries;
 125   size_t _occupied_cards;
 126 
 127   SparsePRTEntry* _entries;
 128   int* _buckets;
 129   int  _free_region;
 130   int  _free_list;
 131 
 132   // Requires that the caller hold a lock preventing parallel modifying
 133   // operations, and that the the table be less than completely full.  If
 134   // an entry for "region_ind" is already in the table, finds it and
 135   // returns its address; otherwise allocates, initializes, inserts and
 136   // returns a new entry for "region_ind".
 137   SparsePRTEntry* entry_for_region_ind_create(RegionIdx_t region_ind);
 138 
 139   // Returns the index of the next free entry in "_entries".
 140   int alloc_entry();
 141   // Declares the entry "fi" to be free.  (It must have already been
 142   // deleted from any bucket lists.
 143   void free_entry(int fi);
 144 
 145 public:
 146   RSHashTable(size_t capacity);
 147   ~RSHashTable();
 148 
 149   static const int NullEntry = -1;
 150 
 151   bool should_expand() const { return _occupied_entries == _num_entries; }
 152 
 153   // Attempts to ensure that the given card_index in the given region is in
 154   // the sparse table.  If successful (because the card was already
 155   // present, or because it was successfully added) returns "true".
 156   // Otherwise, returns "false" to indicate that the addition would
 157   // overflow the entry for the region.  The caller must transfer these
 158   // entries to a larger-capacity representation.
 159   bool add_card(RegionIdx_t region_id, CardIdx_t card_index);
 160 
 161   bool get_cards(RegionIdx_t region_id, CardIdx_t* cards);
 162 
 163   bool delete_entry(RegionIdx_t region_id);
 164 
 165   bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const;
 166 
 167   void add_entry(SparsePRTEntry* e);
 168 
 169   SparsePRTEntry* get_entry(RegionIdx_t region_id) const;
 170 
 171   void clear();
 172 
 173   size_t capacity() const      { return _capacity; }
 174   size_t capacity_mask() const { return _capacity_mask;  }
 175   size_t occupied_entries() const { return _occupied_entries; }
 176   size_t occupied_cards() const   { return _occupied_cards; }
 177   size_t mem_size() const;
 178   // The number of SparsePRTEntry instances available.
 179   size_t num_entries() const { return _num_entries; }
 180 
 181   SparsePRTEntry* entry(int i) const {
 182     assert(i >= 0 && (size_t)i < _num_entries, "precondition");
 183     return (SparsePRTEntry*)((char*)_entries + SparsePRTEntry::size() * i);
 184   }
 185 
 186   void print();
 187 };
 188 
 189 // This is embedded in HRRS iterator.
 190 class RSHashTableIter {
 191   // Return value indicating "invalid/no card".
 192   static const int NoCardFound = -1;
 193 
 194   int _tbl_ind;         // [-1, 0.._rsht->_capacity)
 195   int _bl_ind;          // [-1, 0.._rsht->_capacity)
 196   short _card_ind;      // [0..SparsePRTEntry::cards_num())
 197   RSHashTable* _rsht;
 198 
 199   // If the bucket list pointed to by _bl_ind contains a card, sets
 200   // _bl_ind to the index of that entry,
 201   // Returns the card found if there is, otherwise returns InvalidCard.
 202   CardIdx_t find_first_card_in_list();
 203 
 204   // Computes the proper card index for the card whose offset in the
 205   // current region (as indicated by _bl_ind) is "ci".
 206   // This is subject to errors when there is iteration concurrent with
 207   // modification, but these errors should be benign.
 208   size_t compute_card_ind(CardIdx_t ci);
 209 
 210 public:
 211   RSHashTableIter(RSHashTable* rsht) :
 212     _tbl_ind(RSHashTable::NullEntry), // So that first increment gets to 0.
 213     _bl_ind(RSHashTable::NullEntry),
 214     _card_ind((SparsePRTEntry::cards_num() - 1)),
 215     _rsht(rsht) {}
 216 
 217   bool has_next(size_t& card_index);
 218 };
 219 
 220 // Concurrent access to a SparsePRT must be serialized by some external mutex.
 221 
 222 class SparsePRTIter;
 223 class SparsePRTCleanupTask;
 224 
 225 class SparsePRT {
 226   friend class SparsePRTCleanupTask;
 227 
 228   //  Iterations are done on the _cur hash table, since they only need to
 229   //  see entries visible at the start of a collection pause.
 230   //  All other operations are done using the _next hash table.
 231   RSHashTable* _cur;
 232   RSHashTable* _next;
 233 
 234   HeapRegion* _hr;
 235 
 236   enum SomeAdditionalPrivateConstants {
 237     InitialCapacity = 16
 238   };
 239 
 240   void expand();
 241 
 242   bool _expanded;
 243 
 244   bool expanded() { return _expanded; }
 245   void set_expanded(bool b) { _expanded = b; }
 246 
 247   SparsePRT* _next_expanded;
 248 
 249   SparsePRT* next_expanded() { return _next_expanded; }
 250   void set_next_expanded(SparsePRT* nxt) { _next_expanded = nxt; }
 251 
 252   bool should_be_on_expanded_list();
 253 
 254   static SparsePRT* volatile _head_expanded_list;
 255 
 256 public:
 257   SparsePRT(HeapRegion* hr);
 258 
 259   ~SparsePRT();
 260 
 261   size_t occupied() const { return _next->occupied_cards(); }
 262   size_t mem_size() const;
 263 
 264   // Attempts to ensure that the given card_index in the given region is in
 265   // the sparse table.  If successful (because the card was already
 266   // present, or because it was successfully added) returns "true".
 267   // Otherwise, returns "false" to indicate that the addition would
 268   // overflow the entry for the region.  The caller must transfer these
 269   // entries to a larger-capacity representation.
 270   bool add_card(RegionIdx_t region_id, CardIdx_t card_index);
 271 
 272   // Return the pointer to the entry associated with the given region.
 273   SparsePRTEntry* get_entry(RegionIdx_t region_ind);
 274 
 275   // If there is an entry for "region_ind", removes it and return "true";
 276   // otherwise returns "false."
 277   bool delete_entry(RegionIdx_t region_ind);
 278 
 279   // Clear the table, and reinitialize to initial capacity.
 280   void clear();
 281 
 282   // Ensure that "_cur" and "_next" point to the same table.
 283   void cleanup();
 284 
 285   // Clean up all tables on the expanded list.  Called single threaded.
 286   static void cleanup_all();
 287   RSHashTable* cur() const { return _cur; }
 288 
 289   static void add_to_expanded_list(SparsePRT* sprt);
 290   static SparsePRT* get_from_expanded_list();
 291 
 292   // The purpose of these three methods is to help the GC workers
 293   // during the cleanup pause to recreate the expanded list, purging
 294   // any tables from it that belong to regions that are freed during
 295   // cleanup (if we don't purge those tables, there is a race that
 296   // causes various crashes; see CR 7014261).
 297   //
 298   // We chose to recreate the expanded list, instead of purging
 299   // entries from it by iterating over it, to avoid this serial phase
 300   // at the end of the cleanup pause.
 301   //
 302   // The three methods below work as follows:
 303   // * reset_for_cleanup_tasks() : Nulls the expanded list head at the
 304   //   start of the cleanup pause.
 305   // * do_cleanup_work() : Called by the cleanup workers for every
 306   //   region that is not free / is being freed by the cleanup
 307   //   pause. It creates a list of expanded tables whose head / tail
 308   //   are on the thread-local SparsePRTCleanupTask object.
 309   // * finish_cleanup_task() : Called by the cleanup workers after
 310   //   they complete their cleanup task. It adds the local list into
 311   //   the global expanded list. It assumes that the
 312   //   ParGCRareEvent_lock is being held to ensure MT-safety.
 313   static void reset_for_cleanup_tasks();
 314   void do_cleanup_work(SparsePRTCleanupTask* sprt_cleanup_task);
 315   static void finish_cleanup_task(SparsePRTCleanupTask* sprt_cleanup_task);
 316 
 317   bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const {
 318     return _next->contains_card(region_id, card_index);
 319   }
 320 };
 321 
 322 class SparsePRTIter: public RSHashTableIter {
 323 public:
 324   SparsePRTIter(const SparsePRT* sprt) :
 325     RSHashTableIter(sprt->cur()) {}
 326 
 327   bool has_next(size_t& card_index) {
 328     return RSHashTableIter::has_next(card_index);
 329   }
 330 };
 331 
 332 // This allows each worker during a cleanup pause to create a
 333 // thread-local list of sparse tables that have been expanded and need
 334 // to be processed at the beginning of the next GC pause. This lists
 335 // are concatenated into the single expanded list at the end of the
 336 // cleanup pause.
 337 class SparsePRTCleanupTask {
 338 private:
 339   SparsePRT* _head;
 340   SparsePRT* _tail;
 341 
 342 public:
 343   SparsePRTCleanupTask() : _head(NULL), _tail(NULL) { }
 344 
 345   void add(SparsePRT* sprt);
 346   SparsePRT* head() { return _head; }
 347   SparsePRT* tail() { return _tail; }
 348 };
 349 
 350 #endif // SHARE_VM_GC_G1_SPARSEPRT_HPP