1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_SPARSEPRT_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_SPARSEPRT_HPP
  27 
  28 #include "gc_implementation/g1/g1CollectedHeap.hpp"
  29 #include "gc_implementation/g1/heapRegion.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "memory/cardTableModRefBS.hpp"
  32 #include "runtime/mutex.hpp"
  33 #include "utilities/globalDefinitions.hpp"
  34 
  35 // Sparse remembered set for a heap region (the "owning" region).  Maps
  36 // indices of other regions to short sequences of cards in the other region
  37 // that might contain pointers into the owner region.
  38 
  39 // These tables only expand while they are accessed in parallel --
  40 // deletions may be done in single-threaded code.  This allows us to allow
  41 // unsynchronized reads/iterations, as long as expansions caused by
  42 // insertions only enqueue old versions for deletions, but do not delete
  43 // old versions synchronously.
  44 
  45 class SparsePRTEntry: public CHeapObj<mtGC> {
  46 public:
  47   enum SomePublicConstants {
  48     NullEntry     = -1,
  49     UnrollFactor  =  4
  50   };
  51 private:
  52   RegionIdx_t _region_ind;
  53   int         _next_index;
  54   CardIdx_t   _cards[1];
  55   // WARNING: Don't put any data members beyond this line. Card array has, in fact, variable length.
  56   // It should always be the last data member.
  57 public:
  58   // Returns the size of the entry, used for entry allocation.
  59   static size_t size() { return sizeof(SparsePRTEntry) + sizeof(CardIdx_t) * (cards_num() - 1); }
  60   // Returns the size of the card array.
  61   static int cards_num() {
  62     // The number of cards should be a multiple of 4, because that's our current
  63     // unrolling factor.
  64     static const int s = MAX2<int>(G1RSetSparseRegionEntries & ~(UnrollFactor - 1), UnrollFactor);
  65     return s;
  66   }
  67 
  68   // Set the region_ind to the given value, and delete all cards.
  69   inline void init(RegionIdx_t region_ind);
  70 
  71   RegionIdx_t r_ind() const { return _region_ind; }
  72   bool valid_entry() const { return r_ind() >= 0; }
  73   void set_r_ind(RegionIdx_t rind) { _region_ind = rind; }
  74 
  75   int next_index() const { return _next_index; }
  76   int* next_index_addr() { return &_next_index; }
  77   void set_next_index(int ni) { _next_index = ni; }
  78 
  79   // Returns "true" iff the entry contains the given card index.
  80   inline bool contains_card(CardIdx_t card_index) const;
  81 
  82   // Returns the number of non-NULL card entries.
  83   inline int num_valid_cards() const;
  84 
  85   // Requires that the entry not contain the given card index.  If there is
  86   // space available, add the given card index to the entry and return
  87   // "true"; otherwise, return "false" to indicate that the entry is full.
  88   enum AddCardResult {
  89     overflow,
  90     found,
  91     added
  92   };
  93   inline AddCardResult add_card(CardIdx_t card_index);
  94 
  95   // Copy the current entry's cards into "cards".
  96   inline void copy_cards(CardIdx_t* cards) const;
  97   // Copy the current entry's cards into the "_card" array of "e."
  98   inline void copy_cards(SparsePRTEntry* e) const;
  99 
 100   inline CardIdx_t card(int i) const { return _cards[i]; }
 101 };
 102 
 103 
 104 class RSHashTable : public CHeapObj<mtGC> {
 105 
 106   friend class RSHashTableIter;
 107 
 108   enum SomePrivateConstants {
 109     NullEntry = -1
 110   };
 111 
 112   size_t _capacity;
 113   size_t _capacity_mask;
 114   size_t _occupied_entries;
 115   size_t _occupied_cards;
 116 
 117   SparsePRTEntry* _entries;
 118   int* _buckets;
 119   int  _free_region;
 120   int  _free_list;
 121 
 122   // Requires that the caller hold a lock preventing parallel modifying
 123   // operations, and that the the table be less than completely full.  If
 124   // an entry for "region_ind" is already in the table, finds it and
 125   // returns its address; otherwise returns "NULL."
 126   SparsePRTEntry* entry_for_region_ind(RegionIdx_t region_ind) const;
 127 
 128   // Requires that the caller hold a lock preventing parallel modifying
 129   // operations, and that the the table be less than completely full.  If
 130   // an entry for "region_ind" is already in the table, finds it and
 131   // returns its address; otherwise allocates, initializes, inserts and
 132   // returns a new entry for "region_ind".
 133   SparsePRTEntry* entry_for_region_ind_create(RegionIdx_t region_ind);
 134 
 135   // Returns the index of the next free entry in "_entries".
 136   int alloc_entry();
 137   // Declares the entry "fi" to be free.  (It must have already been
 138   // deleted from any bucket lists.
 139   void free_entry(int fi);
 140 
 141 public:
 142   RSHashTable(size_t capacity);
 143   ~RSHashTable();
 144 
 145   // Attempts to ensure that the given card_index in the given region is in
 146   // the sparse table.  If successful (because the card was already
 147   // present, or because it was successfully added) returns "true".
 148   // Otherwise, returns "false" to indicate that the addition would
 149   // overflow the entry for the region.  The caller must transfer these
 150   // entries to a larger-capacity representation.
 151   bool add_card(RegionIdx_t region_id, CardIdx_t card_index);
 152 
 153   bool get_cards(RegionIdx_t region_id, CardIdx_t* cards);
 154 
 155   bool delete_entry(RegionIdx_t region_id);
 156 
 157   bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const;
 158 
 159   void add_entry(SparsePRTEntry* e);
 160 
 161   SparsePRTEntry* get_entry(RegionIdx_t region_id);
 162 
 163   void clear();
 164 
 165   size_t capacity() const      { return _capacity;       }
 166   size_t capacity_mask() const { return _capacity_mask;  }
 167   size_t occupied_entries() const { return _occupied_entries; }
 168   size_t occupied_cards() const   { return _occupied_cards;   }
 169   size_t mem_size() const;
 170 
 171   SparsePRTEntry* entry(int i) const { return (SparsePRTEntry*)((char*)_entries + SparsePRTEntry::size() * i); }
 172 
 173   void print();
 174 };
 175 
 176 // ValueObj because will be embedded in HRRS iterator.
 177 class RSHashTableIter VALUE_OBJ_CLASS_SPEC {
 178   int _tbl_ind;         // [-1, 0.._rsht->_capacity)
 179   int _bl_ind;          // [-1, 0.._rsht->_capacity)
 180   short _card_ind;      // [0..SparsePRTEntry::cards_num())
 181   RSHashTable* _rsht;
 182 
 183   // If the bucket list pointed to by _bl_ind contains a card, sets
 184   // _bl_ind to the index of that entry, and returns the card.
 185   // Otherwise, returns SparseEntry::NullEntry.
 186   CardIdx_t find_first_card_in_list();
 187 
 188   // Computes the proper card index for the card whose offset in the
 189   // current region (as indicated by _bl_ind) is "ci".
 190   // This is subject to errors when there is iteration concurrent with
 191   // modification, but these errors should be benign.
 192   size_t compute_card_ind(CardIdx_t ci);
 193 
 194 public:
 195   RSHashTableIter(RSHashTable* rsht) :
 196     _tbl_ind(RSHashTable::NullEntry), // So that first increment gets to 0.
 197     _bl_ind(RSHashTable::NullEntry),
 198     _card_ind((SparsePRTEntry::cards_num() - 1)),
 199     _rsht(rsht) {}
 200 
 201   bool has_next(size_t& card_index);
 202 };
 203 
 204 // Concurrent access to a SparsePRT must be serialized by some external mutex.
 205 
 206 class SparsePRTIter;
 207 class SparsePRTCleanupTask;
 208 
 209 class SparsePRT VALUE_OBJ_CLASS_SPEC {
 210   friend class SparsePRTCleanupTask;
 211 
 212   //  Iterations are done on the _cur hash table, since they only need to
 213   //  see entries visible at the start of a collection pause.
 214   //  All other operations are done using the _next hash table.
 215   RSHashTable* _cur;
 216   RSHashTable* _next;
 217 
 218   HeapRegion* _hr;
 219 
 220   enum SomeAdditionalPrivateConstants {
 221     InitialCapacity = 16
 222   };
 223 
 224   void expand();
 225 
 226   bool _expanded;
 227 
 228   bool expanded() { return _expanded; }
 229   void set_expanded(bool b) { _expanded = b; }
 230 
 231   SparsePRT* _next_expanded;
 232 
 233   SparsePRT* next_expanded() { return _next_expanded; }
 234   void set_next_expanded(SparsePRT* nxt) { _next_expanded = nxt; }
 235 
 236   bool should_be_on_expanded_list();
 237 
 238   static SparsePRT* _head_expanded_list;
 239 
 240 public:
 241   SparsePRT(HeapRegion* hr);
 242 
 243   ~SparsePRT();
 244 
 245   size_t occupied() const { return _next->occupied_cards(); }
 246   size_t mem_size() const;
 247 
 248   // Attempts to ensure that the given card_index in the given region is in
 249   // the sparse table.  If successful (because the card was already
 250   // present, or because it was successfully added) returns "true".
 251   // Otherwise, returns "false" to indicate that the addition would
 252   // overflow the entry for the region.  The caller must transfer these
 253   // entries to a larger-capacity representation.
 254   bool add_card(RegionIdx_t region_id, CardIdx_t card_index);
 255 
 256   // If the table hold an entry for "region_ind",  Copies its
 257   // cards into "cards", which must be an array of length at least
 258   // "SparePRTEntry::cards_num()", and returns "true"; otherwise,
 259   // returns "false".
 260   bool get_cards(RegionIdx_t region_ind, CardIdx_t* cards);
 261 
 262   // Return the pointer to the entry associated with the given region.
 263   SparsePRTEntry* get_entry(RegionIdx_t region_ind);
 264 
 265   // If there is an entry for "region_ind", removes it and return "true";
 266   // otherwise returns "false."
 267   bool delete_entry(RegionIdx_t region_ind);
 268 
 269   // Clear the table, and reinitialize to initial capacity.
 270   void clear();
 271 
 272   // Ensure that "_cur" and "_next" point to the same table.
 273   void cleanup();
 274 
 275   // Clean up all tables on the expanded list.  Called single threaded.
 276   static void cleanup_all();
 277   RSHashTable* cur() const { return _cur; }
 278 
 279   static void add_to_expanded_list(SparsePRT* sprt);
 280   static SparsePRT* get_from_expanded_list();
 281 
 282   // The purpose of these three methods is to help the GC workers
 283   // during the cleanup pause to recreate the expanded list, purging
 284   // any tables from it that belong to regions that are freed during
 285   // cleanup (if we don't purge those tables, there is a race that
 286   // causes various crashes; see CR 7014261).
 287   //
 288   // We chose to recreate the expanded list, instead of purging
 289   // entries from it by iterating over it, to avoid this serial phase
 290   // at the end of the cleanup pause.
 291   //
 292   // The three methods below work as follows:
 293   // * reset_for_cleanup_tasks() : Nulls the expanded list head at the
 294   //   start of the cleanup pause.
 295   // * do_cleanup_work() : Called by the cleanup workers for every
 296   //   region that is not free / is being freed by the cleanup
 297   //   pause. It creates a list of expanded tables whose head / tail
 298   //   are on the thread-local SparsePRTCleanupTask object.
 299   // * finish_cleanup_task() : Called by the cleanup workers after
 300   //   they complete their cleanup task. It adds the local list into
 301   //   the global expanded list. It assumes that the
 302   //   ParGCRareEvent_lock is being held to ensure MT-safety.
 303   static void reset_for_cleanup_tasks();
 304   void do_cleanup_work(SparsePRTCleanupTask* sprt_cleanup_task);
 305   static void finish_cleanup_task(SparsePRTCleanupTask* sprt_cleanup_task);
 306 
 307   bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const {
 308     return _next->contains_card(region_id, card_index);
 309   }
 310 };
 311 
 312 class SparsePRTIter: public RSHashTableIter {
 313 public:
 314   SparsePRTIter(const SparsePRT* sprt) :
 315     RSHashTableIter(sprt->cur()) {}
 316 
 317   bool has_next(size_t& card_index) {
 318     return RSHashTableIter::has_next(card_index);
 319   }
 320 };
 321 
 322 // This allows each worker during a cleanup pause to create a
 323 // thread-local list of sparse tables that have been expanded and need
 324 // to be processed at the beginning of the next GC pause. This lists
 325 // are concatenated into the single expanded list at the end of the
 326 // cleanup pause.
 327 class SparsePRTCleanupTask VALUE_OBJ_CLASS_SPEC {
 328 private:
 329   SparsePRT* _head;
 330   SparsePRT* _tail;
 331 
 332 public:
 333   SparsePRTCleanupTask() : _head(NULL), _tail(NULL) { }
 334 
 335   void add(SparsePRT* sprt);
 336   SparsePRT* head() { return _head; }
 337   SparsePRT* tail() { return _tail; }
 338 };
 339 
 340 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_SPARSEPRT_HPP