< prev index next >

src/share/vm/gc/g1/heapRegionRemSet.hpp

Print this page
rev 11045 : 8155233: Lazy coarse map clear
Summary: Only clear the coarse bitmaps of the remembered sets if they were dirtied.
Reviewed-by:


  62 // a bucket list obtain a lock.  This means that any failing attempt to
  63 // find a PRT must be retried with the lock.  It might seem dangerous that
  64 // a read can find a PRT that is concurrently deleted.  This is all right,
  65 // because:
  66 //
  67 //   1) We only actually free PRT's at safe points (though we reuse them at
  68 //      other times).
  69 //   2) We find PRT's in an attempt to add entries.  If a PRT is deleted,
  70 //      it's _coarse_map bit is set, so the that we were attempting to add
  71 //      is represented.  If a deleted PRT is re-used, a thread adding a bit,
  72 //      thinking the PRT is for a different region, does no harm.
  73 
  74 class OtherRegionsTable VALUE_OBJ_CLASS_SPEC {
  75   friend class HeapRegionRemSetIterator;
  76 
  77   G1CollectedHeap* _g1h;
  78   Mutex*           _m;
  79   HeapRegion*      _hr;
  80 
  81   // These are protected by "_m".

  82   BitMap      _coarse_map;
  83   size_t      _n_coarse_entries;
  84   static jint _n_coarsenings;
  85 
  86   PerRegionTable** _fine_grain_regions;
  87   size_t           _n_fine_entries;
  88 
  89   // The fine grain remembered sets are doubly linked together using
  90   // their 'next' and 'prev' fields.
  91   // This allows fast bulk freeing of all the fine grain remembered
  92   // set entries, and fast finding of all of them without iterating
  93   // over the _fine_grain_regions table.
  94   PerRegionTable * _first_all_fine_prts;
  95   PerRegionTable * _last_all_fine_prts;
  96 
  97   // Used to sample a subset of the fine grain PRTs to determine which
  98   // PRT to evict and coarsen.
  99   size_t        _fine_eviction_start;
 100   static size_t _fine_eviction_stride;
 101   static size_t _fine_eviction_sample_size;




  62 // a bucket list obtain a lock.  This means that any failing attempt to
  63 // find a PRT must be retried with the lock.  It might seem dangerous that
  64 // a read can find a PRT that is concurrently deleted.  This is all right,
  65 // because:
  66 //
  67 //   1) We only actually free PRT's at safe points (though we reuse them at
  68 //      other times).
  69 //   2) We find PRT's in an attempt to add entries.  If a PRT is deleted,
  70 //      it's _coarse_map bit is set, so the that we were attempting to add
  71 //      is represented.  If a deleted PRT is re-used, a thread adding a bit,
  72 //      thinking the PRT is for a different region, does no harm.
  73 
  74 class OtherRegionsTable VALUE_OBJ_CLASS_SPEC {
  75   friend class HeapRegionRemSetIterator;
  76 
  77   G1CollectedHeap* _g1h;
  78   Mutex*           _m;
  79   HeapRegion*      _hr;
  80 
  81   // These are protected by "_m".
  82   bool        _coarse_dirty;
  83   BitMap      _coarse_map;
  84   size_t      _n_coarse_entries;
  85   static jint _n_coarsenings;
  86 
  87   PerRegionTable** _fine_grain_regions;
  88   size_t           _n_fine_entries;
  89 
  90   // The fine grain remembered sets are doubly linked together using
  91   // their 'next' and 'prev' fields.
  92   // This allows fast bulk freeing of all the fine grain remembered
  93   // set entries, and fast finding of all of them without iterating
  94   // over the _fine_grain_regions table.
  95   PerRegionTable * _first_all_fine_prts;
  96   PerRegionTable * _last_all_fine_prts;
  97 
  98   // Used to sample a subset of the fine grain PRTs to determine which
  99   // PRT to evict and coarsen.
 100   size_t        _fine_eviction_start;
 101   static size_t _fine_eviction_stride;
 102   static size_t _fine_eviction_sample_size;


< prev index next >