src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp

Print this page
rev 4803 : imported patch thomas-comments-2


  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP
  27 
  28 #include "gc_implementation/g1/sparsePRT.hpp"
  29 
  30 // Remembered set for a heap region.  Represent a set of "cards" that
  31 // contain pointers into the owner heap region.  Cards are defined somewhat
  32 // abstractly, in terms of what the "BlockOffsetTable" in use can parse.
  33 
  34 class G1CollectedHeap;
  35 class G1BlockOffsetSharedArray;
  36 class HeapRegion;
  37 class HeapRegionRemSetIterator;
  38 class PerRegionTable;
  39 class SparsePRT;

  40 
  41 // Essentially a wrapper around SparsePRTCleanupTask. See
  42 // sparsePRT.hpp for more details.
  43 class HRRSCleanupTask : public SparsePRTCleanupTask {
  44 };
  45 
  46 // The "_coarse_map" is a bitmap with one bit for each region, where set
  47 // bits indicate that the corresponding region may contain some pointer
  48 // into the owning region.
  49 
  50 // The "_fine_grain_entries" array is an open hash table of PerRegionTables
  51 // (PRTs), indicating regions for which we're keeping the RS as a set of
  52 // cards.  The strategy is to cap the size of the fine-grain table,
  53 // deleting an entry and setting the corresponding coarse-grained bit when
  54 // we would overflow this cap.
  55 
  56 // We use a mixture of locking and lock-free techniques here.  We allow
  57 // threads to locate PRTs without locking, but threads attempting to alter
  58 // a bucket list obtain a lock.  This means that any failing attempt to
  59 // find a PRT must be retried with the lock.  It might seem dangerous that


 174   // Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
 175   // Make sure any entries for higher regions are invalid.
 176   static void shrink_from_card_cache(size_t new_n_regs);
 177 
 178   static void print_from_card_cache();
 179 };
 180 
 181 class HeapRegionRemSet : public CHeapObj<mtGC> {
 182   friend class VMStructs;
 183   friend class HeapRegionRemSetIterator;
 184 
 185 public:
 186   enum Event {
 187     Event_EvacStart, Event_EvacEnd, Event_RSUpdateEnd
 188   };
 189 
 190 private:
 191   G1BlockOffsetSharedArray* _bosa;
 192   G1BlockOffsetSharedArray* bosa() const { return _bosa; }
 193 




 194   OtherRegionsTable _other_regions;
 195 
 196   enum ParIterState { Unclaimed, Claimed, Complete };
 197   volatile ParIterState _iter_state;
 198   volatile jlong _iter_claimed;
 199 
 200   // Unused unless G1RecordHRRSOops is true.
 201 
 202   static const int MaxRecorded = 1000000;
 203   static OopOrNarrowOopStar* _recorded_oops;
 204   static HeapWord**          _recorded_cards;
 205   static HeapRegion**        _recorded_regions;
 206   static int                 _n_recorded;
 207 
 208   static const int MaxRecordedEvents = 1000;
 209   static Event*       _recorded_events;
 210   static int*         _recorded_event_index;
 211   static int          _n_recorded_events;
 212 
 213   static void print_event(outputStream* str, Event evnt);


 265   bool iter_is_complete();
 266 
 267   // Support for claiming blocks of cards during iteration
 268   size_t iter_claimed() const { return (size_t)_iter_claimed; }
 269   // Claim the next block of cards
 270   size_t iter_claimed_next(size_t step) {
 271     size_t current, next;
 272     do {
 273       current = iter_claimed();
 274       next = current + step;
 275     } while (Atomic::cmpxchg((jlong)next, &_iter_claimed, (jlong)current) != (jlong)current);
 276     return current;
 277   }
 278   void reset_for_par_iteration();
 279 
 280   bool verify_ready_for_par_iteration() {
 281     return (_iter_state == Unclaimed) && (_iter_claimed == 0);
 282   }
 283 
 284   // The actual # of bytes this hr_remset takes up.

 285   size_t mem_size() {
 286     return _other_regions.mem_size()
 287       // This correction is necessary because the above includes the second
 288       // part.
 289       + sizeof(this) - sizeof(OtherRegionsTable);

 290   }
 291 
 292   // Returns the memory occupancy of all static data structures associated
 293   // with remembered sets.
 294   static size_t static_mem_size() {
 295     return OtherRegionsTable::static_mem_size();
 296   }
 297 
 298   // Returns the memory occupancy of all free_list data structures associated
 299   // with remembered sets.
 300   static size_t fl_mem_size() {
 301     return OtherRegionsTable::fl_mem_size();
 302   }
 303 
 304   bool contains_reference(OopOrNarrowOopStar from) const {
 305     return _other_regions.contains_reference(from);
 306   }































 307   void print() const;
 308 
 309   // Called during a stop-world phase to perform any deferred cleanups.
 310   static void cleanup();
 311 
 312   // Declare the heap size (in # of regions) to the HeapRegionRemSet(s).
 313   // (Uses it to initialize from_card_cache).
 314   static void init_heap(uint max_regions) {
 315     OtherRegionsTable::init_from_card_cache((size_t) max_regions);
 316   }
 317 
 318   // Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
 319   static void shrink_heap(uint new_n_regs) {
 320     OtherRegionsTable::shrink_from_card_cache((size_t) new_n_regs);
 321   }
 322 
 323 #ifndef PRODUCT
 324   static void print_from_card_cache() {
 325     OtherRegionsTable::print_from_card_cache();
 326   }




  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP
  27 
  28 #include "gc_implementation/g1/sparsePRT.hpp"
  29 
  30 // Remembered set for a heap region.  Represent a set of "cards" that
  31 // contain pointers into the owner heap region.  Cards are defined somewhat
  32 // abstractly, in terms of what the "BlockOffsetTable" in use can parse.
  33 
  34 class G1CollectedHeap;
  35 class G1BlockOffsetSharedArray;
  36 class HeapRegion;
  37 class HeapRegionRemSetIterator;
  38 class PerRegionTable;
  39 class SparsePRT;
  40 class nmethod;
  41 
  42 // Essentially a wrapper around SparsePRTCleanupTask. See
  43 // sparsePRT.hpp for more details.
  44 class HRRSCleanupTask : public SparsePRTCleanupTask {
  45 };
  46 
  47 // The "_coarse_map" is a bitmap with one bit for each region, where set
  48 // bits indicate that the corresponding region may contain some pointer
  49 // into the owning region.
  50 
  51 // The "_fine_grain_entries" array is an open hash table of PerRegionTables
  52 // (PRTs), indicating regions for which we're keeping the RS as a set of
  53 // cards.  The strategy is to cap the size of the fine-grain table,
  54 // deleting an entry and setting the corresponding coarse-grained bit when
  55 // we would overflow this cap.
  56 
  57 // We use a mixture of locking and lock-free techniques here.  We allow
  58 // threads to locate PRTs without locking, but threads attempting to alter
  59 // a bucket list obtain a lock.  This means that any failing attempt to
  60 // find a PRT must be retried with the lock.  It might seem dangerous that


 175   // Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
 176   // Make sure any entries for higher regions are invalid.
 177   static void shrink_from_card_cache(size_t new_n_regs);
 178 
 179   static void print_from_card_cache();
 180 };
 181 
 182 class HeapRegionRemSet : public CHeapObj<mtGC> {
 183   friend class VMStructs;
 184   friend class HeapRegionRemSetIterator;
 185 
 186 public:
 187   enum Event {
 188     Event_EvacStart, Event_EvacEnd, Event_RSUpdateEnd
 189   };
 190 
 191 private:
 192   G1BlockOffsetSharedArray* _bosa;
 193   G1BlockOffsetSharedArray* bosa() const { return _bosa; }
 194 
 195   // A list of code blobs (nmethods) whose code contains pointers into
 196   // the region that owns this RSet.
 197   GrowableArray<nmethod*>* _strong_code_roots_list;
 198 
 199   OtherRegionsTable _other_regions;
 200 
 201   enum ParIterState { Unclaimed, Claimed, Complete };
 202   volatile ParIterState _iter_state;
 203   volatile jlong _iter_claimed;
 204 
 205   // Unused unless G1RecordHRRSOops is true.
 206 
 207   static const int MaxRecorded = 1000000;
 208   static OopOrNarrowOopStar* _recorded_oops;
 209   static HeapWord**          _recorded_cards;
 210   static HeapRegion**        _recorded_regions;
 211   static int                 _n_recorded;
 212 
 213   static const int MaxRecordedEvents = 1000;
 214   static Event*       _recorded_events;
 215   static int*         _recorded_event_index;
 216   static int          _n_recorded_events;
 217 
 218   static void print_event(outputStream* str, Event evnt);


 270   bool iter_is_complete();
 271 
 272   // Support for claiming blocks of cards during iteration
 273   size_t iter_claimed() const { return (size_t)_iter_claimed; }
 274   // Claim the next block of cards
 275   size_t iter_claimed_next(size_t step) {
 276     size_t current, next;
 277     do {
 278       current = iter_claimed();
 279       next = current + step;
 280     } while (Atomic::cmpxchg((jlong)next, &_iter_claimed, (jlong)current) != (jlong)current);
 281     return current;
 282   }
 283   void reset_for_par_iteration();
 284 
 285   bool verify_ready_for_par_iteration() {
 286     return (_iter_state == Unclaimed) && (_iter_claimed == 0);
 287   }
 288 
 289   // The actual # of bytes this hr_remset takes up.
 290   // Note also includes the strong code root set.
 291   size_t mem_size() {
 292     return _other_regions.mem_size()
 293       // This correction is necessary because the above includes the second
 294       // part.
 295       + (sizeof(this) - sizeof(OtherRegionsTable))
 296       + strong_code_roots_mem_size();
 297   }
 298 
 299   // Returns the memory occupancy of all static data structures associated
 300   // with remembered sets.
 301   static size_t static_mem_size() {
 302     return OtherRegionsTable::static_mem_size();
 303   }
 304 
 305   // Returns the memory occupancy of all free_list data structures associated
 306   // with remembered sets.
 307   static size_t fl_mem_size() {
 308     return OtherRegionsTable::fl_mem_size();
 309   }
 310 
 311   bool contains_reference(OopOrNarrowOopStar from) const {
 312     return _other_regions.contains_reference(from);
 313   }
 314 
 315   // Routines for managing the list of code roots that point into
 316   // the heap region that owns this RSet.
 317   void add_strong_code_root(nmethod* nm);
 318   void remove_strong_code_root(nmethod* nm);
 319 
 320   // During a collection, migrate the successfully evacuated strong
 321   // code roots that referenced into the region that owns this RSet
 322   // to the RSets of the new regions that they now point into.
 323   // Unsuccessfully evacuated code roots are not migrated.
 324   void migrate_strong_code_roots();
 325 
 326   // Applies blk->do_code_blob() to each of the entries in
 327   // the strong code roots list
 328   void strong_code_roots_do(CodeBlobClosure* blk) const;
 329 
 330   // Returns the number of elements in the strong code roots list
 331   int strong_code_roots_list_length() {
 332     return _strong_code_roots_list->length();
 333   }
 334 
 335   // Returns true if the strong code roots contains the given
 336   // nmethod.
 337   bool strong_code_roots_list_contains(nmethod* nm) {
 338     return _strong_code_roots_list->contains(nm);
 339   }
 340 
 341   // Returns the amount of memory, in bytes, currently
 342   // consumed by the strong code roots.
 343   size_t strong_code_roots_mem_size();
 344 
 345   void print() const;
 346 
 347   // Called during a stop-world phase to perform any deferred cleanups.
 348   static void cleanup();
 349 
 350   // Declare the heap size (in # of regions) to the HeapRegionRemSet(s).
 351   // (Uses it to initialize from_card_cache).
 352   static void init_heap(uint max_regions) {
 353     OtherRegionsTable::init_from_card_cache((size_t) max_regions);
 354   }
 355 
 356   // Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
 357   static void shrink_heap(uint new_n_regs) {
 358     OtherRegionsTable::shrink_from_card_cache((size_t) new_n_regs);
 359   }
 360 
 361 #ifndef PRODUCT
 362   static void print_from_card_cache() {
 363     OtherRegionsTable::print_from_card_cache();
 364   }