< prev index next >

src/hotspot/share/gc/g1/heapRegionRemSet.hpp

Print this page




  54 // The "_fine_grain_entries" array is an open hash table of PerRegionTables
  55 // (PRTs), indicating regions for which we're keeping the RS as a set of
  56 // cards.  The strategy is to cap the size of the fine-grain table,
  57 // deleting an entry and setting the corresponding coarse-grained bit when
  58 // we would overflow this cap.
  59 
  60 // We use a mixture of locking and lock-free techniques here.  We allow
  61 // threads to locate PRTs without locking, but threads attempting to alter
  62 // a bucket list obtain a lock.  This means that any failing attempt to
  63 // find a PRT must be retried with the lock.  It might seem dangerous that
  64 // a read can find a PRT that is concurrently deleted.  This is all right,
  65 // because:
  66 //
  67 //   1) We only actually free PRT's at safe points (though we reuse them at
  68 //      other times).
  69 //   2) We find PRT's in an attempt to add entries.  If a PRT is deleted,
  70 //      it's _coarse_map bit is set, so the that we were attempting to add
  71 //      is represented.  If a deleted PRT is re-used, a thread adding a bit,
  72 //      thinking the PRT is for a different region, does no harm.
  73 
  74 class OtherRegionsTable VALUE_OBJ_CLASS_SPEC {
  75   friend class HeapRegionRemSetIterator;
  76 
  77   G1CollectedHeap* _g1h;
  78   Mutex*           _m;
  79   HeapRegion*      _hr;
  80 
  81   // These are protected by "_m".
  82   CHeapBitMap _coarse_map;
  83   size_t      _n_coarse_entries;
  84   static jint _n_coarsenings;
  85 
  86   PerRegionTable** _fine_grain_regions;
  87   size_t           _n_fine_entries;
  88 
  89   // The fine grain remembered sets are doubly linked together using
  90   // their 'next' and 'prev' fields.
  91   // This allows fast bulk freeing of all the fine grain remembered
  92   // set entries, and fast finding of all of them without iterating
  93   // over the _fine_grain_regions table.
  94   PerRegionTable * _first_all_fine_prts;




  54 // The "_fine_grain_entries" array is an open hash table of PerRegionTables
  55 // (PRTs), indicating regions for which we're keeping the RS as a set of
  56 // cards.  The strategy is to cap the size of the fine-grain table,
  57 // deleting an entry and setting the corresponding coarse-grained bit when
  58 // we would overflow this cap.
  59 
  60 // We use a mixture of locking and lock-free techniques here.  We allow
  61 // threads to locate PRTs without locking, but threads attempting to alter
  62 // a bucket list obtain a lock.  This means that any failing attempt to
  63 // find a PRT must be retried with the lock.  It might seem dangerous that
  64 // a read can find a PRT that is concurrently deleted.  This is all right,
  65 // because:
  66 //
  67 //   1) We only actually free PRT's at safe points (though we reuse them at
  68 //      other times).
  69 //   2) We find PRT's in an attempt to add entries.  If a PRT is deleted,
  70 //      it's _coarse_map bit is set, so the that we were attempting to add
  71 //      is represented.  If a deleted PRT is re-used, a thread adding a bit,
  72 //      thinking the PRT is for a different region, does no harm.
  73 
  74 class OtherRegionsTable {
  75   friend class HeapRegionRemSetIterator;
  76 
  77   G1CollectedHeap* _g1h;
  78   Mutex*           _m;
  79   HeapRegion*      _hr;
  80 
  81   // These are protected by "_m".
  82   CHeapBitMap _coarse_map;
  83   size_t      _n_coarse_entries;
  84   static jint _n_coarsenings;
  85 
  86   PerRegionTable** _fine_grain_regions;
  87   size_t           _n_fine_entries;
  88 
  89   // The fine grain remembered sets are doubly linked together using
  90   // their 'next' and 'prev' fields.
  91   // This allows fast bulk freeing of all the fine grain remembered
  92   // set entries, and fast finding of all of them without iterating
  93   // over the _fine_grain_regions table.
  94   PerRegionTable * _first_all_fine_prts;


< prev index next >