1 /*
   2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_HPP
  27 
  28 // A G1RemSet provides ways of iterating over pointers into a selected
  29 // collection set.
  30 
  31 class G1CollectedHeap;
  32 class CardTableModRefBarrierSet;
  33 class ConcurrentG1Refine;
  34 
  35 // A G1RemSet in which each heap region has a rem set that records the
  36 // external heap references into it.  Uses a mod ref bs to track updates,
  37 // so that they can be used to update the individual region remsets.
  38 
  39 class G1RemSet: public CHeapObj {
  40 protected:
  41   G1CollectedHeap* _g1;
  42   unsigned _conc_refine_cards;
  43   size_t n_workers();
  44 
  45 protected:
  46   enum SomePrivateConstants {
  47     UpdateRStoMergeSync  = 0,
  48     MergeRStoDoDirtySync = 1,
  49     DoDirtySync          = 2,
  50     LastSync             = 3,
  51 
  52     SeqTask              = 0,
  53     NumSeqTasks          = 1
  54   };
  55 
  56   CardTableModRefBS*             _ct_bs;
  57   SubTasksDone*                  _seq_task;
  58   G1CollectorPolicy* _g1p;
  59 
  60   ConcurrentG1Refine* _cg1r;
  61 
  62   size_t*             _cards_scanned;
  63   size_t              _total_cards_scanned;
  64 
  65   // _traversal_in_progress is "true" iff a traversal is in progress.
  66 
  67   bool _traversal_in_progress;
  68   void set_traversal(bool b) { _traversal_in_progress = b; }
  69 
  70   // Used for caching the closure that is responsible for scanning
  71   // references into the collection set.
  72   OopsInHeapRegionClosure** _cset_rs_update_cl;
  73 
  74   // The routine that performs the actual work of refining a dirty
  75   // card.
  76   // If check_for_refs_into_refs is true then a true result is returned
  77   // if the card contains oops that have references into the current
  78   // collection set.
  79   bool concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
  80                                     bool check_for_refs_into_cset);
  81 
  82 protected:
  83   template <class T> void write_ref_nv(HeapRegion* from, T* p);
  84   template <class T> void par_write_ref_nv(HeapRegion* from, T* p, int tid);
  85 
  86 public:
  87   // This is called to reset dual hash tables after the gc pause
  88   // is finished and the initial hash table is no longer being
  89   // scanned.
  90   void cleanupHRRS();
  91 
  92   G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs);
  93   ~G1RemSet();
  94 
  95   // Invoke "blk->do_oop" on all pointers into the CS in objects in regions
  96   // outside the CS (having invoked "blk->set_region" to set the "from"
  97   // region correctly beforehand.) The "worker_i" param is for the
  98   // parallel case where the number of the worker thread calling this
  99   // function can be helpful in partitioning the work to be done. It
 100   // should be the same as the "i" passed to the calling thread's
 101   // work(i) function. In the sequential case this param will be ingored.
 102   void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
 103                                    int worker_i);
 104 
 105   // Prepare for and cleanup after an oops_into_collection_set_do
 106   // call.  Must call each of these once before and after (in sequential
 107   // code) any threads call oops_into_collection_set_do.  (This offers an
 108   // opportunity to sequential setup and teardown of structures needed by a
 109   // parallel iteration over the CS's RS.)
 110   void prepare_for_oops_into_collection_set_do();
 111   void cleanup_after_oops_into_collection_set_do();
 112 
 113   void scanRS(OopsInHeapRegionClosure* oc, int worker_i);
 114   void updateRS(DirtyCardQueue* into_cset_dcq, int worker_i);
 115 
 116   HeapRegion* calculateStartRegion(int i);
 117 
 118   CardTableModRefBS* ct_bs() { return _ct_bs; }
 119   size_t cardsScanned() { return _total_cards_scanned; }
 120 
 121   // Record, if necessary, the fact that *p (where "p" is in region "from",
 122   // which is required to be non-NULL) has changed to a new non-NULL value.
 123   // [Below the virtual version calls a non-virtual protected
 124   // workhorse that is templatified for narrow vs wide oop.]
 125   inline void write_ref(HeapRegion* from, oop* p) {
 126     write_ref_nv(from, p);
 127   }
 128   inline void write_ref(HeapRegion* from, narrowOop* p) {
 129     write_ref_nv(from, p);
 130   }
 131   inline void par_write_ref(HeapRegion* from, oop* p, int tid) {
 132     par_write_ref_nv(from, p, tid);
 133   }
 134   inline void par_write_ref(HeapRegion* from, narrowOop* p, int tid) {
 135     par_write_ref_nv(from, p, tid);
 136   }
 137 
 138   bool self_forwarded(oop obj);
 139 
 140   // Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
 141   // or card, respectively, such that a region or card with a corresponding
 142   // 0 bit contains no part of any live object.  Eliminates any remembered
 143   // set entries that correspond to dead heap ranges.
 144   void scrub(BitMap* region_bm, BitMap* card_bm);
 145 
 146   // Like the above, but assumes is called in parallel: "worker_num" is the
 147   // parallel thread id of the current thread, and "claim_val" is the
 148   // value that should be used to claim heap regions.
 149   void scrub_par(BitMap* region_bm, BitMap* card_bm,
 150                  int worker_num, int claim_val);
 151 
 152   // Refine the card corresponding to "card_ptr".  If "sts" is non-NULL,
 153   // join and leave around parts that must be atomic wrt GC.  (NULL means
 154   // being done at a safepoint.)
 155   // If check_for_refs_into_cset is true, a true result is returned
 156   // if the given card contains oops that have references into the
 157   // current collection set.
 158   virtual bool concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
 159                                        bool check_for_refs_into_cset);
 160 
 161   // Print any relevant summary info.
 162   virtual void print_summary_info();
 163 
 164   // Prepare remembered set for verification.
 165   virtual void prepare_for_verify();
 166 };
 167 
 168 #define G1_REM_SET_LOGGING 0
 169 
 170 class CountNonCleanMemRegionClosure: public MemRegionClosure {
 171   G1CollectedHeap* _g1;
 172   int _n;
 173   HeapWord* _start_first;
 174 public:
 175   CountNonCleanMemRegionClosure(G1CollectedHeap* g1) :
 176     _g1(g1), _n(0), _start_first(NULL)
 177   {}
 178   void do_MemRegion(MemRegion mr);
 179   int n() { return _n; };
 180   HeapWord* start_first() { return _start_first; }
 181 };
 182 
 183 class UpdateRSOopClosure: public OopClosure {
 184   HeapRegion* _from;
 185   G1RemSet* _rs;
 186   int _worker_i;
 187 
 188   template <class T> void do_oop_work(T* p);
 189 
 190 public:
 191   UpdateRSOopClosure(G1RemSet* rs, int worker_i = 0) :
 192     _from(NULL), _rs(rs), _worker_i(worker_i) {
 193     guarantee(_rs != NULL, "Requires an HRIntoG1RemSet");
 194   }
 195 
 196   void set_from(HeapRegion* from) {
 197     assert(from != NULL, "from region must be non-NULL");
 198     _from = from;
 199   }
 200 
 201   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
 202   virtual void do_oop(oop* p)       { do_oop_work(p); }
 203 
 204   // Override: this closure is idempotent.
 205   //  bool idempotent() { return true; }
 206   bool apply_to_weak_ref_discovered_field() { return true; }
 207 };
 208 
 209 class UpdateRSetImmediate: public OopsInHeapRegionClosure {
 210 private:
 211   G1RemSet* _g1_rem_set;
 212 
 213   template <class T> void do_oop_work(T* p);
 214 public:
 215   UpdateRSetImmediate(G1RemSet* rs) :
 216     _g1_rem_set(rs) {}
 217 
 218   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
 219   virtual void do_oop(      oop* p) { do_oop_work(p); }
 220 };
 221 
 222 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_HPP