1 /*
   2  * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_G1_SPARSEPRT_HPP
  26 #define SHARE_VM_GC_G1_SPARSEPRT_HPP
  27 
  28 #include "gc/g1/g1CollectedHeap.hpp"
  29 #include "gc/g1/heapRegion.hpp"
  30 #include "gc/shared/cardTableModRefBS.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "runtime/mutex.hpp"
  33 #include "utilities/globalDefinitions.hpp"
  34 
  35 // Sparse remembered set for a heap region (the "owning" region).  Maps
  36 // indices of other regions to short sequences of cards in the other region
  37 // that might contain pointers into the owner region.
  38 
  39 // These tables only expand while they are accessed in parallel --
  40 // deletions may be done in single-threaded code.  This allows us to allow
  41 // unsynchronized reads/iterations, as long as expansions caused by
  42 // insertions only enqueue old versions for deletions, but do not delete
  43 // old versions synchronously.
  44 
  45 class SparsePRTEntry: public CHeapObj<mtGC> {
  46 private:
  47   // The type of a card entry.
  48   typedef uint16_t card_elem_t;
  49 
  50   // We need to make sizeof(SparsePRTEntry) an even multiple of maximum member size,
  51   // in order to force correct alignment that could otherwise cause SIGBUS errors
  52   // when reading the member variables. This calculates the minimum number of card
  53   // array elements required to get that alignment.
  54   static const size_t card_array_alignment = sizeof(int) / sizeof(card_elem_t);
  55 
  56   RegionIdx_t _region_ind;
  57   int         _next_index;
  58   int         _next_null;
  59   // The actual cards stored in this array.
  60   // WARNING: Don't put any data members beyond this line. Card array has, in fact, variable length.
  61   // It should always be the last data member.
  62   card_elem_t _cards[card_array_alignment];
  63 
  64   // Copy the current entry's cards into "cards".
  65   inline void copy_cards(card_elem_t* cards) const;
  66 public:
  67   // Returns the size of the entry, used for entry allocation.
  68   static size_t size() { return sizeof(SparsePRTEntry) + sizeof(card_elem_t) * (cards_num() - card_array_alignment); }
  69   // Returns the size of the card array.
  70   static int cards_num() {
  71     return align_up(G1RSetSparseRegionEntries, card_array_alignment);
  72   }
  73 
  74   // Set the region_ind to the given value, and delete all cards.
  75   inline void init(RegionIdx_t region_ind);
  76 
  77   RegionIdx_t r_ind() const { return _region_ind; }
  78   bool valid_entry() const { return r_ind() >= 0; }
  79   void set_r_ind(RegionIdx_t rind) { _region_ind = rind; }
  80 
  81   int next_index() const { return _next_index; }
  82   int* next_index_addr() { return &_next_index; }
  83   void set_next_index(int ni) { _next_index = ni; }
  84 
  85   // Returns "true" iff the entry contains the given card index.
  86   inline bool contains_card(CardIdx_t card_index) const;
  87 
  88   // Returns the number of non-NULL card entries.
  89   inline int num_valid_cards() const { return _next_null; }
  90 
  91   // Requires that the entry not contain the given card index.  If there is
  92   // space available, add the given card index to the entry and return
  93   // "true"; otherwise, return "false" to indicate that the entry is full.
  94   enum AddCardResult {
  95     overflow,
  96     found,
  97     added
  98   };
  99   inline AddCardResult add_card(CardIdx_t card_index);
 100 
 101   // Copy the current entry's cards into the "_card" array of "e."
 102   inline void copy_cards(SparsePRTEntry* e) const;
 103 
 104   inline CardIdx_t card(int i) const {
 105     assert(i >= 0, "must be nonnegative");
 106     assert(i < cards_num(), "range checking");
 107     return (CardIdx_t)_cards[i];
 108   }
 109 };
 110 
 111 class RSHashTable : public CHeapObj<mtGC> {
 112 
 113   friend class RSHashTableIter;
 114 
 115 
 116   // Inverse maximum hash table occupancy used.
 117   static float TableOccupancyFactor;
 118 
 119   size_t _num_entries;
 120 
 121   size_t _capacity;
 122   size_t _capacity_mask;
 123   size_t _occupied_entries;
 124   size_t _occupied_cards;
 125 
 126   SparsePRTEntry* _entries;
 127   int* _buckets;
 128   int  _free_region;
 129   int  _free_list;
 130 
 131   // Requires that the caller hold a lock preventing parallel modifying
 132   // operations, and that the the table be less than completely full.  If
 133   // an entry for "region_ind" is already in the table, finds it and
 134   // returns its address; otherwise allocates, initializes, inserts and
 135   // returns a new entry for "region_ind".
 136   SparsePRTEntry* entry_for_region_ind_create(RegionIdx_t region_ind);
 137 
 138   // Returns the index of the next free entry in "_entries".
 139   int alloc_entry();
 140   // Declares the entry "fi" to be free.  (It must have already been
 141   // deleted from any bucket lists.
 142   void free_entry(int fi);
 143 
 144 public:
 145   RSHashTable(size_t capacity);
 146   ~RSHashTable();
 147 
 148   static const int NullEntry = -1;
 149 
 150   bool should_expand() const { return _occupied_entries == _num_entries; }
 151 
 152   // Attempts to ensure that the given card_index in the given region is in
 153   // the sparse table.  If successful (because the card was already
 154   // present, or because it was successfully added) returns "true".
 155   // Otherwise, returns "false" to indicate that the addition would
 156   // overflow the entry for the region.  The caller must transfer these
 157   // entries to a larger-capacity representation.
 158   bool add_card(RegionIdx_t region_id, CardIdx_t card_index);
 159 
 160   bool get_cards(RegionIdx_t region_id, CardIdx_t* cards);
 161 
 162   bool delete_entry(RegionIdx_t region_id);
 163 
 164   bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const;
 165 
 166   void add_entry(SparsePRTEntry* e);
 167 
 168   SparsePRTEntry* get_entry(RegionIdx_t region_id) const;
 169 
 170   void clear();
 171 
 172   size_t capacity() const      { return _capacity; }
 173   size_t capacity_mask() const { return _capacity_mask;  }
 174   size_t occupied_entries() const { return _occupied_entries; }
 175   size_t occupied_cards() const   { return _occupied_cards; }
 176   size_t mem_size() const;
 177   // The number of SparsePRTEntry instances available.
 178   size_t num_entries() const { return _num_entries; }
 179 
 180   SparsePRTEntry* entry(int i) const {
 181     assert(i >= 0 && (size_t)i < _num_entries, "precondition");
 182     return (SparsePRTEntry*)((char*)_entries + SparsePRTEntry::size() * i);
 183   }
 184 
 185   void print();
 186 };
 187 
 188 // ValueObj because will be embedded in HRRS iterator.
 189 class RSHashTableIter VALUE_OBJ_CLASS_SPEC {
 190   // Return value indicating "invalid/no card".
 191   static const int NoCardFound = -1;
 192 
 193   int _tbl_ind;         // [-1, 0.._rsht->_capacity)
 194   int _bl_ind;          // [-1, 0.._rsht->_capacity)
 195   short _card_ind;      // [0..SparsePRTEntry::cards_num())
 196   RSHashTable* _rsht;
 197 
 198   // If the bucket list pointed to by _bl_ind contains a card, sets
 199   // _bl_ind to the index of that entry,
 200   // Returns the card found if there is, otherwise returns InvalidCard.
 201   CardIdx_t find_first_card_in_list();
 202 
 203   // Computes the proper card index for the card whose offset in the
 204   // current region (as indicated by _bl_ind) is "ci".
 205   // This is subject to errors when there is iteration concurrent with
 206   // modification, but these errors should be benign.
 207   size_t compute_card_ind(CardIdx_t ci);
 208 
 209 public:
 210   RSHashTableIter(RSHashTable* rsht) :
 211     _tbl_ind(RSHashTable::NullEntry), // So that first increment gets to 0.
 212     _bl_ind(RSHashTable::NullEntry),
 213     _card_ind((SparsePRTEntry::cards_num() - 1)),
 214     _rsht(rsht) {}
 215 
 216   bool has_next(size_t& card_index);
 217 };
 218 
 219 // Concurrent access to a SparsePRT must be serialized by some external mutex.
 220 
 221 class SparsePRTIter;
 222 class SparsePRTCleanupTask;
 223 
 224 class SparsePRT VALUE_OBJ_CLASS_SPEC {
 225   friend class SparsePRTCleanupTask;
 226 
 227   //  Iterations are done on the _cur hash table, since they only need to
 228   //  see entries visible at the start of a collection pause.
 229   //  All other operations are done using the _next hash table.
 230   RSHashTable* _cur;
 231   RSHashTable* _next;
 232 
 233   HeapRegion* _hr;
 234 
 235   enum SomeAdditionalPrivateConstants {
 236     InitialCapacity = 16
 237   };
 238 
 239   void expand();
 240 
 241   bool _expanded;
 242 
 243   bool expanded() { return _expanded; }
 244   void set_expanded(bool b) { _expanded = b; }
 245 
 246   SparsePRT* _next_expanded;
 247 
 248   SparsePRT* next_expanded() { return _next_expanded; }
 249   void set_next_expanded(SparsePRT* nxt) { _next_expanded = nxt; }
 250 
 251   bool should_be_on_expanded_list();
 252 
 253   static SparsePRT* volatile _head_expanded_list;
 254 
 255 public:
 256   SparsePRT(HeapRegion* hr);
 257 
 258   ~SparsePRT();
 259 
 260   size_t occupied() const { return _next->occupied_cards(); }
 261   size_t mem_size() const;
 262 
 263   // Attempts to ensure that the given card_index in the given region is in
 264   // the sparse table.  If successful (because the card was already
 265   // present, or because it was successfully added) returns "true".
 266   // Otherwise, returns "false" to indicate that the addition would
 267   // overflow the entry for the region.  The caller must transfer these
 268   // entries to a larger-capacity representation.
 269   bool add_card(RegionIdx_t region_id, CardIdx_t card_index);
 270 
 271   // Return the pointer to the entry associated with the given region.
 272   SparsePRTEntry* get_entry(RegionIdx_t region_ind);
 273 
 274   // If there is an entry for "region_ind", removes it and return "true";
 275   // otherwise returns "false."
 276   bool delete_entry(RegionIdx_t region_ind);
 277 
 278   // Clear the table, and reinitialize to initial capacity.
 279   void clear();
 280 
 281   // Ensure that "_cur" and "_next" point to the same table.
 282   void cleanup();
 283 
 284   // Clean up all tables on the expanded list.  Called single threaded.
 285   static void cleanup_all();
 286   RSHashTable* cur() const { return _cur; }
 287 
 288   static void add_to_expanded_list(SparsePRT* sprt);
 289   static SparsePRT* get_from_expanded_list();
 290 
 291   // The purpose of these three methods is to help the GC workers
 292   // during the cleanup pause to recreate the expanded list, purging
 293   // any tables from it that belong to regions that are freed during
 294   // cleanup (if we don't purge those tables, there is a race that
 295   // causes various crashes; see CR 7014261).
 296   //
 297   // We chose to recreate the expanded list, instead of purging
 298   // entries from it by iterating over it, to avoid this serial phase
 299   // at the end of the cleanup pause.
 300   //
 301   // The three methods below work as follows:
 302   // * reset_for_cleanup_tasks() : Nulls the expanded list head at the
 303   //   start of the cleanup pause.
 304   // * do_cleanup_work() : Called by the cleanup workers for every
 305   //   region that is not free / is being freed by the cleanup
 306   //   pause. It creates a list of expanded tables whose head / tail
 307   //   are on the thread-local SparsePRTCleanupTask object.
 308   // * finish_cleanup_task() : Called by the cleanup workers after
 309   //   they complete their cleanup task. It adds the local list into
 310   //   the global expanded list. It assumes that the
 311   //   ParGCRareEvent_lock is being held to ensure MT-safety.
 312   static void reset_for_cleanup_tasks();
 313   void do_cleanup_work(SparsePRTCleanupTask* sprt_cleanup_task);
 314   static void finish_cleanup_task(SparsePRTCleanupTask* sprt_cleanup_task);
 315 
 316   bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const {
 317     return _next->contains_card(region_id, card_index);
 318   }
 319 };
 320 
 321 class SparsePRTIter: public RSHashTableIter {
 322 public:
 323   SparsePRTIter(const SparsePRT* sprt) :
 324     RSHashTableIter(sprt->cur()) {}
 325 
 326   bool has_next(size_t& card_index) {
 327     return RSHashTableIter::has_next(card_index);
 328   }
 329 };
 330 
 331 // This allows each worker during a cleanup pause to create a
 332 // thread-local list of sparse tables that have been expanded and need
 333 // to be processed at the beginning of the next GC pause. This lists
 334 // are concatenated into the single expanded list at the end of the
 335 // cleanup pause.
 336 class SparsePRTCleanupTask VALUE_OBJ_CLASS_SPEC {
 337 private:
 338   SparsePRT* _head;
 339   SparsePRT* _tail;
 340 
 341 public:
 342   SparsePRTCleanupTask() : _head(NULL), _tail(NULL) { }
 343 
 344   void add(SparsePRT* sprt);
 345   SparsePRT* head() { return _head; }
 346   SparsePRT* tail() { return _tail; }
 347 };
 348 
 349 #endif // SHARE_VM_GC_G1_SPARSEPRT_HPP