< prev index next >

src/hotspot/share/gc/g1/sparsePRT.hpp

Print this page
rev 52572 : [mq]: 8213996-remove-sparseprt-table


  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_G1_SPARSEPRT_HPP
  26 #define SHARE_VM_GC_G1_SPARSEPRT_HPP
  27 
  28 #include "gc/g1/g1CollectedHeap.hpp"
  29 #include "gc/g1/heapRegion.hpp"
  30 #include "gc/shared/cardTableBarrierSet.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "runtime/mutex.hpp"
  33 #include "utilities/align.hpp"
  34 #include "utilities/globalDefinitions.hpp"
  35 
  36 // Sparse remembered set for a heap region (the "owning" region).  Maps
  37 // indices of other regions to short sequences of cards in the other region
  38 // that might contain pointers into the owner region.
  39 
  40 // These tables only expand while they are accessed in parallel --
  41 // deletions may be done in single-threaded code.  This allows us to allow
  42 // unsynchronized reads/iterations, as long as expansions caused by
  43 // insertions only enqueue old versions for deletions, but do not delete
  44 // old versions synchronously.
  45 
  46 class SparsePRTEntry: public CHeapObj<mtGC> {
  47 private:
  48   // The type of a card entry.
  49   typedef uint16_t card_elem_t;
  50 
  51   // We need to make sizeof(SparsePRTEntry) an even multiple of maximum member size,
  52   // in order to force correct alignment that could otherwise cause SIGBUS errors
  53   // when reading the member variables. This calculates the minimum number of card
  54   // array elements required to get that alignment.
  55   static const size_t card_array_alignment = sizeof(int) / sizeof(card_elem_t);
  56 
  57   RegionIdx_t _region_ind;
  58   int         _next_index;
  59   int         _next_null;
  60   // The actual cards stored in this array.
  61   // WARNING: Don't put any data members beyond this line. Card array has, in fact, variable length.
  62   // It should always be the last data member.
  63   card_elem_t _cards[card_array_alignment];
  64 
  65   // Copy the current entry's cards into "cards".


 203 
 204   // Computes the proper card index for the card whose offset in the
 205   // current region (as indicated by _bl_ind) is "ci".
 206   // This is subject to errors when there is iteration concurrent with
 207   // modification, but these errors should be benign.
 208   size_t compute_card_ind(CardIdx_t ci);
 209 
 210 public:
 211   RSHashTableIter(RSHashTable* rsht) :
 212     _tbl_ind(RSHashTable::NullEntry), // So that first increment gets to 0.
 213     _bl_ind(RSHashTable::NullEntry),
 214     _card_ind((SparsePRTEntry::cards_num() - 1)),
 215     _rsht(rsht) {}
 216 
 217   bool has_next(size_t& card_index);
 218 };
 219 
 220 // Concurrent access to a SparsePRT must be serialized by some external mutex.
 221 
 222 class SparsePRTIter;
 223 class SparsePRTCleanupTask;
 224 
 225 class SparsePRT {
 226   friend class SparsePRTCleanupTask;
 227 
 228   //  Iterations are done on the _cur hash table, since they only need to
 229   //  see entries visible at the start of a collection pause.
 230   //  All other operations are done using the _next hash table.
 231   RSHashTable* _cur;
 232   RSHashTable* _next;
 233 
 234   enum SomeAdditionalPrivateConstants {
 235     InitialCapacity = 16
 236   };
 237 
 238   void expand();
 239 
 240   bool _expanded;
 241 
 242   bool expanded() { return _expanded; }
 243   void set_expanded(bool b) { _expanded = b; }
 244 
 245   SparsePRT* _next_expanded;
 246 
 247   SparsePRT* next_expanded() { return _next_expanded; }
 248   void set_next_expanded(SparsePRT* nxt) { _next_expanded = nxt; }
 249 
 250   bool should_be_on_expanded_list();
 251 
 252   static SparsePRT* volatile _head_expanded_list;
 253 
 254 public:
 255   SparsePRT();
 256 
 257   ~SparsePRT();
 258 
 259   size_t occupied() const { return _next->occupied_cards(); }
 260   size_t mem_size() const;
 261 
 262   // Attempts to ensure that the given card_index in the given region is in
 263   // the sparse table.  If successful (because the card was already
 264   // present, or because it was successfully added) returns "true".
 265   // Otherwise, returns "false" to indicate that the addition would
 266   // overflow the entry for the region.  The caller must transfer these
 267   // entries to a larger-capacity representation.
 268   bool add_card(RegionIdx_t region_id, CardIdx_t card_index);
 269 
 270   // Return the pointer to the entry associated with the given region.
 271   SparsePRTEntry* get_entry(RegionIdx_t region_ind);
 272 
 273   // If there is an entry for "region_ind", removes it and return "true";
 274   // otherwise returns "false."
 275   bool delete_entry(RegionIdx_t region_ind);
 276 
 277   // Clear the table, and reinitialize to initial capacity.
 278   void clear();
 279 
 280   // Ensure that "_cur" and "_next" point to the same table.
 281   void cleanup();
 282 
 283   // Clean up all tables on the expanded list.  Called single threaded.
 284   static void cleanup_all();
 285   RSHashTable* cur() const { return _cur; }
 286 
 287   static void add_to_expanded_list(SparsePRT* sprt);
 288   static SparsePRT* get_from_expanded_list();
 289 
 290   // The purpose of these three methods is to help the GC workers
 291   // during the cleanup pause to recreate the expanded list, purging
 292   // any tables from it that belong to regions that are freed during
 293   // cleanup (if we don't purge those tables, there is a race that
 294   // causes various crashes; see CR 7014261).
 295   //
 296   // We chose to recreate the expanded list, instead of purging
 297   // entries from it by iterating over it, to avoid this serial phase
 298   // at the end of the cleanup pause.
 299   //
 300   // The three methods below work as follows:
 301   // * reset_for_cleanup_tasks() : Nulls the expanded list head at the
 302   //   start of the cleanup pause.
 303   // * do_cleanup_work() : Called by the cleanup workers for every
 304   //   region that is not free / is being freed by the cleanup
 305   //   pause. It creates a list of expanded tables whose head / tail
 306   //   are on the thread-local SparsePRTCleanupTask object.
 307   // * finish_cleanup_task() : Called by the cleanup workers after
 308   //   they complete their cleanup task. It adds the local list into
 309   //   the global expanded list. It assumes that the
 310   //   ParGCRareEvent_lock is being held to ensure MT-safety.
 311   static void reset_for_cleanup_tasks();
 312   void do_cleanup_work(SparsePRTCleanupTask* sprt_cleanup_task);
 313   static void finish_cleanup_task(SparsePRTCleanupTask* sprt_cleanup_task);
 314 
 315   bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const {
 316     return _next->contains_card(region_id, card_index);
 317   }
 318 };
 319 
 320 class SparsePRTIter: public RSHashTableIter {
 321 public:
 322   SparsePRTIter(const SparsePRT* sprt) :
 323     RSHashTableIter(sprt->cur()) {}
 324 
 325   bool has_next(size_t& card_index) {
 326     return RSHashTableIter::has_next(card_index);
 327   }
 328 };
 329 
 330 // This allows each worker during a cleanup pause to create a
 331 // thread-local list of sparse tables that have been expanded and need
 332 // to be processed at the beginning of the next GC pause. This lists
 333 // are concatenated into the single expanded list at the end of the
 334 // cleanup pause.
 335 class SparsePRTCleanupTask {
 336 private:
 337   SparsePRT* _head;
 338   SparsePRT* _tail;
 339 
 340 public:
 341   SparsePRTCleanupTask() : _head(NULL), _tail(NULL) { }
 342 
 343   void add(SparsePRT* sprt);
 344   SparsePRT* head() { return _head; }
 345   SparsePRT* tail() { return _tail; }
 346 };
 347 
 348 #endif // SHARE_VM_GC_G1_SPARSEPRT_HPP


  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_G1_SPARSEPRT_HPP
  26 #define SHARE_VM_GC_G1_SPARSEPRT_HPP
  27 
  28 #include "gc/g1/g1CollectedHeap.hpp"
  29 #include "gc/g1/heapRegion.hpp"
  30 #include "gc/shared/cardTableBarrierSet.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "runtime/mutex.hpp"
  33 #include "utilities/align.hpp"
  34 #include "utilities/globalDefinitions.hpp"
  35 
  36 // Sparse remembered set for a heap region (the "owning" region).  Maps
  37 // indices of other regions to short sequences of cards in the other region
  38 // that might contain pointers into the owner region.
  39 






  40 class SparsePRTEntry: public CHeapObj<mtGC> {
  41 private:
  42   // The type of a card entry.
  43   typedef uint16_t card_elem_t;
  44 
  45   // We need to make sizeof(SparsePRTEntry) an even multiple of maximum member size,
  46   // in order to force correct alignment that could otherwise cause SIGBUS errors
  47   // when reading the member variables. This calculates the minimum number of card
  48   // array elements required to get that alignment.
  49   static const size_t card_array_alignment = sizeof(int) / sizeof(card_elem_t);
  50 
  51   RegionIdx_t _region_ind;
  52   int         _next_index;
  53   int         _next_null;
  54   // The actual cards stored in this array.
  55   // WARNING: Don't put any data members beyond this line. Card array has, in fact, variable length.
  56   // It should always be the last data member.
  57   card_elem_t _cards[card_array_alignment];
  58 
  59   // Copy the current entry's cards into "cards".


 197 
 198   // Computes the proper card index for the card whose offset in the
 199   // current region (as indicated by _bl_ind) is "ci".
 200   // This is subject to errors when there is iteration concurrent with
 201   // modification, but these errors should be benign.
 202   size_t compute_card_ind(CardIdx_t ci);
 203 
 204 public:
 205   RSHashTableIter(RSHashTable* rsht) :
 206     _tbl_ind(RSHashTable::NullEntry), // So that first increment gets to 0.
 207     _bl_ind(RSHashTable::NullEntry),
 208     _card_ind((SparsePRTEntry::cards_num() - 1)),
 209     _rsht(rsht) {}
 210 
 211   bool has_next(size_t& card_index);
 212 };
 213 
 214 // Concurrent access to a SparsePRT must be serialized by some external mutex.
 215 
 216 class SparsePRTIter;

 217 
 218 class SparsePRT {
 219   friend class SparsePRTIter;
 220 
 221   RSHashTable* _table;




 222 
 223   enum SomeAdditionalPrivateConstants {
 224     InitialCapacity = 16
 225   };
 226 
 227   void expand();
 228 














 229 public:
 230   SparsePRT();

 231   ~SparsePRT();
 232 
 233   size_t occupied() const { return _table->occupied_cards(); }
 234   size_t mem_size() const;
 235 
 236   // Attempts to ensure that the given card_index in the given region is in
 237   // the sparse table.  If successful (because the card was already
 238   // present, or because it was successfully added) returns "true".
 239   // Otherwise, returns "false" to indicate that the addition would
 240   // overflow the entry for the region.  The caller must transfer these
 241   // entries to a larger-capacity representation.
 242   bool add_card(RegionIdx_t region_id, CardIdx_t card_index);
 243 
 244   // Return the pointer to the entry associated with the given region.
 245   SparsePRTEntry* get_entry(RegionIdx_t region_ind);
 246 
 247   // If there is an entry for "region_ind", removes it and return "true";
 248   // otherwise returns "false."
 249   bool delete_entry(RegionIdx_t region_ind);
 250 
 251   // Clear the table, and reinitialize to initial capacity.
 252   void clear();
 253 



































 254   bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const {
 255     return _table->contains_card(region_id, card_index);
 256   }
 257 };
 258 
 259 class SparsePRTIter: public RSHashTableIter {
 260 public:
 261   SparsePRTIter(const SparsePRT* sprt) :
 262     RSHashTableIter(sprt->_table) {}
 263 
 264   bool has_next(size_t& card_index) {
 265     return RSHashTableIter::has_next(card_index);
 266   }


















 267 };
 268 
 269 #endif // SHARE_VM_GC_G1_SPARSEPRT_HPP
< prev index next >