src/share/vm/gc_implementation/g1/g1RemSet.hpp

Print this page
rev 4802 : imported patch optimize-nmethod-scanning


  64   ConcurrentG1Refine*    _cg1r;
  65 
  66   size_t*                _cards_scanned;
  67   size_t                 _total_cards_scanned;
  68 
  69   // Used for caching the closure that is responsible for scanning
  70   // references into the collection set.
  71   OopsInHeapRegionClosure** _cset_rs_update_cl;
  72 
  73   // Print the given summary info
  74   virtual void print_summary_info(G1RemSetSummary * summary, const char * header = NULL);
  75 public:
  76   // This is called to reset dual hash tables after the gc pause
  77   // is finished and the initial hash table is no longer being
  78   // scanned.
  79   void cleanupHRRS();
  80 
  81   G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs);
  82   ~G1RemSet();
  83 
  84   // Invoke "blk->do_oop" on all pointers into the CS in objects in regions
  85   // outside the CS (having invoked "blk->set_region" to set the "from"
  86   // region correctly beforehand.) The "worker_i" param is for the
  87   // parallel case where the number of the worker thread calling this
  88   // function can be helpful in partitioning the work to be done. It
  89   // should be the same as the "i" passed to the calling thread's
  90   // work(i) function. In the sequential case this param will be ingored.
  91   void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, int worker_i);









  92 
  93   // Prepare for and cleanup after an oops_into_collection_set_do
  94   // call.  Must call each of these once before and after (in sequential
  95   // code) any threads call oops_into_collection_set_do.  (This offers an
  96   // opportunity to sequential setup and teardown of structures needed by a
  97   // parallel iteration over the CS's RS.)
  98   void prepare_for_oops_into_collection_set_do();
  99   void cleanup_after_oops_into_collection_set_do();
 100 
 101   void scanRS(OopsInHeapRegionClosure* oc, int worker_i);



 102   void updateRS(DirtyCardQueue* into_cset_dcq, int worker_i);
 103 
 104   CardTableModRefBS* ct_bs() { return _ct_bs; }
 105   size_t cardsScanned() { return _total_cards_scanned; }
 106 
 107   // Record, if necessary, the fact that *p (where "p" is in region "from",
 108   // which is required to be non-NULL) has changed to a new non-NULL value.
 109   template <class T> void write_ref(HeapRegion* from, T* p);
 110   template <class T> void par_write_ref(HeapRegion* from, T* p, int tid);
 111 
 112   // Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
 113   // or card, respectively, such that a region or card with a corresponding
 114   // 0 bit contains no part of any live object.  Eliminates any remembered
 115   // set entries that correspond to dead heap ranges.
 116   void scrub(BitMap* region_bm, BitMap* card_bm);
 117 
 118   // Like the above, but assumes is called in parallel: "worker_num" is the
 119   // parallel thread id of the current thread, and "claim_val" is the
 120   // value that should be used to claim heap regions.
 121   void scrub_par(BitMap* region_bm, BitMap* card_bm,




  64   ConcurrentG1Refine*    _cg1r;
  65 
  66   size_t*                _cards_scanned;
  67   size_t                 _total_cards_scanned;
  68 
  69   // Used for caching the closure that is responsible for scanning
  70   // references into the collection set.
  71   OopsInHeapRegionClosure** _cset_rs_update_cl;
  72 
  73   // Print the given summary info
  74   virtual void print_summary_info(G1RemSetSummary * summary, const char * header = NULL);
  75 public:
  76   // This is called to reset dual hash tables after the gc pause
  77   // is finished and the initial hash table is no longer being
  78   // scanned.
  79   void cleanupHRRS();
  80 
  81   G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs);
  82   ~G1RemSet();
  83 
  84   // Invoke "blk->do_oop" on all pointers into the collection set
  85   // from objects in regions outside the collection set (having
  86   // invoked "blk->set_region" to set the "from" region correctly
  87   // beforehand.)
  88   //
  89   // Invoke code_root_cl->do_code_blob on the unmarked nmethods
  90   // on the strong code roots list for each region in the
  91   // collection set.
  92   //
  93   // The "worker_i" param is for the parallel case where the id
  94   // of the worker thread calling this function can be helpful in
  95   // partitioning the work to be done. It should be the same as
  96   // the "i" passed to the calling thread's work(i) function.
  97   // In the sequential case this param will be ignored.
  98   void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
  99                                    CodeBlobToOopClosure* code_root_cl,
 100                                    int worker_i);
 101 
 102   // Prepare for and cleanup after an oops_into_collection_set_do
 103   // call.  Must call each of these once before and after (in sequential
 104   // code) any threads call oops_into_collection_set_do.  (This offers an
 105   // opportunity to sequential setup and teardown of structures needed by a
 106   // parallel iteration over the CS's RS.)
 107   void prepare_for_oops_into_collection_set_do();
 108   void cleanup_after_oops_into_collection_set_do();
 109 
 110   void scanRS(OopsInHeapRegionClosure* oc,
 111               CodeBlobToOopClosure* code_root_cl,
 112               int worker_i);
 113 
 114   void updateRS(DirtyCardQueue* into_cset_dcq, int worker_i);
 115 
 116   CardTableModRefBS* ct_bs() { return _ct_bs; }
 117   size_t cardsScanned() { return _total_cards_scanned; }
 118 
 119   // Record, if necessary, the fact that *p (where "p" is in region "from",
 120   // which is required to be non-NULL) has changed to a new non-NULL value.
 121   template <class T> void write_ref(HeapRegion* from, T* p);
 122   template <class T> void par_write_ref(HeapRegion* from, T* p, int tid);
 123 
 124   // Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
 125   // or card, respectively, such that a region or card with a corresponding
 126   // 0 bit contains no part of any live object.  Eliminates any remembered
 127   // set entries that correspond to dead heap ranges.
 128   void scrub(BitMap* region_bm, BitMap* card_bm);
 129 
 130   // Like the above, but assumes is called in parallel: "worker_num" is the
 131   // parallel thread id of the current thread, and "claim_val" is the
 132   // value that should be used to claim heap regions.
 133   void scrub_par(BitMap* region_bm, BitMap* card_bm,