1 /*
   2  * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_SPARSEPRT_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_SPARSEPRT_HPP
  27 
  28 #include "gc_implementation/g1/g1CollectedHeap.hpp"
  29 #include "gc_implementation/g1/heapRegion.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "memory/cardTableModRefBS.hpp"
  32 #include "runtime/mutex.hpp"
  33 #include "utilities/globalDefinitions.hpp"
  34 
  35 // Sparse remembered set for a heap region (the "owning" region).  Maps
  36 // indices of other regions to short sequences of cards in the other region
  37 // that might contain pointers into the owner region.
  38 
  39 // These tables only expand while they are accessed in parallel --
  40 // deletions may be done in single-threaded code.  This allows us to allow
  41 // unsynchronized reads/iterations, as long as expansions caused by
  42 // insertions only enqueue old versions for deletions, but do not delete
  43 // old versions synchronously.
  44 
  45 class SparsePRTEntry: public CHeapObj<mtGC> {
  46 private:
  47   RegionIdx_t _region_ind;
  48   int         _next_index;
  49   int         _next_null;
  50   CardIdx_t   _cards[4];
  51   // WARNING: Don't put any data members beyond this line. Card array has, in fact, variable length.
  52   // It should always be the last data member.
  53   // Note regarding the declared length of _cards:
  54   //  it is used to make sizeof(SparsePRTEntry) be an even multiple of 4, 
  55   //  in order to force alignment that could otherwise cause SIGBUS errors when
  56   //  reading the int variables.
  57 public:
  58   // Returns the size of the entry, used for entry allocation.
  59   static size_t size() { return sizeof(SparsePRTEntry) + sizeof(CardIdx_t) * (cards_num() - 4); }
  60   // Returns the size of the card array.
  61   static int cards_num() {
  62       return G1RSetSparseRegionEntries;
  63   }
  64 
  65   // Set the region_ind to the given value, and delete all cards.
  66   inline void init(RegionIdx_t region_ind);
  67 
  68   RegionIdx_t r_ind() const { return _region_ind; }
  69   bool valid_entry() const { return r_ind() >= 0; }
  70   void set_r_ind(RegionIdx_t rind) { _region_ind = rind; }
  71 
  72   int next_index() const { return _next_index; }
  73   int* next_index_addr() { return &_next_index; }
  74   void set_next_index(int ni) { _next_index = ni; }
  75 
  76   // Returns "true" iff the entry contains the given card index.
  77   inline bool contains_card(CardIdx_t card_index) const;
  78 
  79   // Returns the number of non-NULL card entries.
  80   inline int num_valid_cards() const { return _next_null; }
  81 
  82   // Requires that the entry not contain the given card index.  If there is
  83   // space available, add the given card index to the entry and return
  84   // "true"; otherwise, return "false" to indicate that the entry is full.
  85   enum AddCardResult {
  86     overflow,
  87     found,
  88     added
  89   };
  90   inline AddCardResult add_card(CardIdx_t card_index);
  91 
  92   // Copy the current entry's cards into "cards".
  93   inline void copy_cards(CardIdx_t* cards) const;
  94   // Copy the current entry's cards into the "_card" array of "e."
  95   inline void copy_cards(SparsePRTEntry* e) const;
  96 
  97   inline CardIdx_t card(int i) const {
  98       assert(i >= 0, "must be nonnegative");
  99       assert(i < cards_num(), "range checking");
 100       return _cards[i]; 
 101   }
 102 };
 103 
 104 
 105 class RSHashTable : public CHeapObj<mtGC> {
 106 
 107   friend class RSHashTableIter;
 108 
 109 
 110   size_t _capacity;
 111   size_t _capacity_mask;
 112   size_t _occupied_entries;
 113   size_t _occupied_cards;
 114 
 115   SparsePRTEntry* _entries;
 116   int* _buckets;
 117   int  _free_region;
 118   int  _free_list;
 119 
 120   // Requires that the caller hold a lock preventing parallel modifying
 121   // operations, and that the the table be less than completely full.  If
 122   // an entry for "region_ind" is already in the table, finds it and
 123   // returns its address; otherwise allocates, initializes, inserts and
 124   // returns a new entry for "region_ind".
 125   SparsePRTEntry* entry_for_region_ind_create(RegionIdx_t region_ind);
 126 
 127   // Returns the index of the next free entry in "_entries".
 128   int alloc_entry();
 129   // Declares the entry "fi" to be free.  (It must have already been
 130   // deleted from any bucket lists.
 131   void free_entry(int fi);
 132 
 133 public:
 134   RSHashTable(size_t capacity);
 135   ~RSHashTable();
 136   
 137   enum SomePublicConstants {
 138     NullEntry = -1
 139   };
 140 
 141   // Attempts to ensure that the given card_index in the given region is in
 142   // the sparse table.  If successful (because the card was already
 143   // present, or because it was successfully added) returns "true".
 144   // Otherwise, returns "false" to indicate that the addition would
 145   // overflow the entry for the region.  The caller must transfer these
 146   // entries to a larger-capacity representation.
 147   bool add_card(RegionIdx_t region_id, CardIdx_t card_index);
 148 
 149   bool get_cards(RegionIdx_t region_id, CardIdx_t* cards);
 150 
 151   bool delete_entry(RegionIdx_t region_id);
 152 
 153   bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const;
 154 
 155   void add_entry(SparsePRTEntry* e);
 156 
 157   SparsePRTEntry* get_entry(RegionIdx_t region_id) const;
 158 
 159   void clear();
 160 
 161   size_t capacity() const      { return _capacity; }
 162   size_t capacity_mask() const { return _capacity_mask;  }
 163   size_t occupied_entries() const { return _occupied_entries; }
 164   size_t occupied_cards() const   { return _occupied_cards; }
 165   size_t mem_size() const;
 166 
 167   SparsePRTEntry* entry(int i) const { return (SparsePRTEntry*)((char*)_entries + SparsePRTEntry::size() * i); }
 168 
 169   void print();
 170 };
 171 
 172 // ValueObj because will be embedded in HRRS iterator.
 173 class RSHashTableIter VALUE_OBJ_CLASS_SPEC {
 174   int _tbl_ind;         // [-1, 0.._rsht->_capacity)
 175   int _bl_ind;          // [-1, 0.._rsht->_capacity)
 176   short _card_ind;      // [0..SparsePRTEntry::cards_num())
 177   RSHashTable* _rsht;
 178 
 179   // If the bucket list pointed to by _bl_ind contains a card, sets
 180   // _bl_ind to the index of that entry, 
 181   //   updates the card reference to contain the found card and returns true.
 182   // Otherwise, returns false.
 183   bool find_first_card_in_list(CardIdx_t & );
 184 
 185   // Computes the proper card index for the card whose offset in the
 186   // current region (as indicated by _bl_ind) is "ci".
 187   // This is subject to errors when there is iteration concurrent with
 188   // modification, but these errors should be benign.
 189   size_t compute_card_ind(CardIdx_t ci);
 190 
 191 public:
 192   RSHashTableIter(RSHashTable* rsht) :
 193     _tbl_ind(RSHashTable::NullEntry), // So that first increment gets to 0.
 194     _bl_ind(RSHashTable::NullEntry),
 195     _card_ind((SparsePRTEntry::cards_num() - 1)),
 196     _rsht(rsht) {}
 197 
 198   bool has_next(size_t& card_index);
 199 };
 200 
 201 // Concurrent access to a SparsePRT must be serialized by some external mutex.
 202 
 203 class SparsePRTIter;
 204 class SparsePRTCleanupTask;
 205 
 206 class SparsePRT VALUE_OBJ_CLASS_SPEC {
 207   friend class SparsePRTCleanupTask;
 208 
 209   //  Iterations are done on the _cur hash table, since they only need to
 210   //  see entries visible at the start of a collection pause.
 211   //  All other operations are done using the _next hash table.
 212   RSHashTable* _cur;
 213   RSHashTable* _next;
 214 
 215   HeapRegion* _hr;
 216 
 217   enum SomeAdditionalPrivateConstants {
 218     InitialCapacity = 16
 219   };
 220 
 221   void expand();
 222 
 223   bool _expanded;
 224 
 225   bool expanded() { return _expanded; }
 226   void set_expanded(bool b) { _expanded = b; }
 227 
 228   SparsePRT* _next_expanded;
 229 
 230   SparsePRT* next_expanded() { return _next_expanded; }
 231   void set_next_expanded(SparsePRT* nxt) { _next_expanded = nxt; }
 232 
 233   bool should_be_on_expanded_list();
 234 
 235   static SparsePRT* _head_expanded_list;
 236 
 237 public:
 238   SparsePRT(HeapRegion* hr);
 239 
 240   ~SparsePRT();
 241 
 242   size_t occupied() const { return _next->occupied_cards(); }
 243   size_t mem_size() const;
 244 
 245   // Attempts to ensure that the given card_index in the given region is in
 246   // the sparse table.  If successful (because the card was already
 247   // present, or because it was successfully added) returns "true".
 248   // Otherwise, returns "false" to indicate that the addition would
 249   // overflow the entry for the region.  The caller must transfer these
 250   // entries to a larger-capacity representation.
 251   bool add_card(RegionIdx_t region_id, CardIdx_t card_index);
 252 
 253   // If the table hold an entry for "region_ind",  Copies its
 254   // cards into "cards", which must be an array of length at least
 255   // "SparePRTEntry::cards_num()", and returns "true"; otherwise,
 256   // returns "false".
 257   bool get_cards(RegionIdx_t region_ind, CardIdx_t* cards);
 258 
 259   // Return the pointer to the entry associated with the given region.
 260   SparsePRTEntry* get_entry(RegionIdx_t region_ind);
 261 
 262   // If there is an entry for "region_ind", removes it and return "true";
 263   // otherwise returns "false."
 264   bool delete_entry(RegionIdx_t region_ind);
 265 
 266   // Clear the table, and reinitialize to initial capacity.
 267   void clear();
 268 
 269   // Ensure that "_cur" and "_next" point to the same table.
 270   void cleanup();
 271 
 272   // Clean up all tables on the expanded list.  Called single threaded.
 273   static void cleanup_all();
 274   RSHashTable* cur() const { return _cur; }
 275 
 276   static void add_to_expanded_list(SparsePRT* sprt);
 277   static SparsePRT* get_from_expanded_list();
 278 
 279   // The purpose of these three methods is to help the GC workers
 280   // during the cleanup pause to recreate the expanded list, purging
 281   // any tables from it that belong to regions that are freed during
 282   // cleanup (if we don't purge those tables, there is a race that
 283   // causes various crashes; see CR 7014261).
 284   //
 285   // We chose to recreate the expanded list, instead of purging
 286   // entries from it by iterating over it, to avoid this serial phase
 287   // at the end of the cleanup pause.
 288   //
 289   // The three methods below work as follows:
 290   // * reset_for_cleanup_tasks() : Nulls the expanded list head at the
 291   //   start of the cleanup pause.
 292   // * do_cleanup_work() : Called by the cleanup workers for every
 293   //   region that is not free / is being freed by the cleanup
 294   //   pause. It creates a list of expanded tables whose head / tail
 295   //   are on the thread-local SparsePRTCleanupTask object.
 296   // * finish_cleanup_task() : Called by the cleanup workers after
 297   //   they complete their cleanup task. It adds the local list into
 298   //   the global expanded list. It assumes that the
 299   //   ParGCRareEvent_lock is being held to ensure MT-safety.
 300   static void reset_for_cleanup_tasks();
 301   void do_cleanup_work(SparsePRTCleanupTask* sprt_cleanup_task);
 302   static void finish_cleanup_task(SparsePRTCleanupTask* sprt_cleanup_task);
 303 
 304   bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const {
 305     return _next->contains_card(region_id, card_index);
 306   }
 307 };
 308 
 309 class SparsePRTIter: public RSHashTableIter {
 310 public:
 311   SparsePRTIter(const SparsePRT* sprt) :
 312     RSHashTableIter(sprt->cur()) {}
 313 
 314   bool has_next(size_t& card_index) {
 315     return RSHashTableIter::has_next(card_index);
 316   }
 317 };
 318 
 319 // This allows each worker during a cleanup pause to create a
 320 // thread-local list of sparse tables that have been expanded and need
 321 // to be processed at the beginning of the next GC pause. This lists
 322 // are concatenated into the single expanded list at the end of the
 323 // cleanup pause.
 324 class SparsePRTCleanupTask VALUE_OBJ_CLASS_SPEC {
 325 private:
 326   SparsePRT* _head;
 327   SparsePRT* _tail;
 328 
 329 public:
 330   SparsePRTCleanupTask() : _head(NULL), _tail(NULL) { }
 331 
 332   void add(SparsePRT* sprt);
 333   SparsePRT* head() { return _head; }
 334   SparsePRT* tail() { return _tail; }
 335 };
 336 
 337 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_SPARSEPRT_HPP