1 /*
   2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // A G1RemSet provides ways of iterating over pointers into a selected
  26 // collection set.
  27 
  28 class G1CollectedHeap;
  29 class CardTableModRefBarrierSet;
  30 class ConcurrentG1Refine;
  31 
  32 // A G1RemSet in which each heap region has a rem set that records the
  33 // external heap references into it.  Uses a mod ref bs to track updates,
  34 // so that they can be used to update the individual region remsets.
  35 
  36 class G1RemSet: public CHeapObj {
  37 protected:
  38   G1CollectedHeap* _g1;
  39   unsigned _conc_refine_cards;
  40   size_t n_workers();
  41 
  42 protected:
  43   enum SomePrivateConstants {
  44     UpdateRStoMergeSync  = 0,
  45     MergeRStoDoDirtySync = 1,
  46     DoDirtySync          = 2,
  47     LastSync             = 3,
  48 
  49     SeqTask              = 0,
  50     NumSeqTasks          = 1
  51   };
  52 
  53   CardTableModRefBS*             _ct_bs;
  54   SubTasksDone*                  _seq_task;
  55   G1CollectorPolicy* _g1p;
  56 
  57   ConcurrentG1Refine* _cg1r;
  58 
  59   size_t*             _cards_scanned;
  60   size_t              _total_cards_scanned;
  61 
  62   // _traversal_in_progress is "true" iff a traversal is in progress.
  63 
  64   bool _traversal_in_progress;
  65   void set_traversal(bool b) { _traversal_in_progress = b; }
  66 
  67   // Used for caching the closure that is responsible for scanning
  68   // references into the collection set.
  69   OopsInHeapRegionClosure** _cset_rs_update_cl;
  70 
  71   // The routine that performs the actual work of refining a dirty
  72   // card.
  73   // If check_for_refs_into_refs is true then a true result is returned
  74   // if the card contains oops that have references into the current
  75   // collection set.
  76   bool concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
  77                                     bool check_for_refs_into_cset);
  78 
  79 protected:
  80   template <class T> void write_ref_nv(HeapRegion* from, T* p);
  81   template <class T> void par_write_ref_nv(HeapRegion* from, T* p, int tid);
  82 
  83 public:
  84   // This is called to reset dual hash tables after the gc pause
  85   // is finished and the initial hash table is no longer being
  86   // scanned.
  87   void cleanupHRRS();
  88 
  89   G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs);
  90   ~G1RemSet();
  91 
  92   // Invoke "blk->do_oop" on all pointers into the CS in objects in regions
  93   // outside the CS (having invoked "blk->set_region" to set the "from"
  94   // region correctly beforehand.) The "worker_i" param is for the
  95   // parallel case where the number of the worker thread calling this
  96   // function can be helpful in partitioning the work to be done. It
  97   // should be the same as the "i" passed to the calling thread's
  98   // work(i) function. In the sequential case this param will be ingored.
  99   void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
 100                                    int worker_i);
 101 
 102   // Prepare for and cleanup after an oops_into_collection_set_do
 103   // call.  Must call each of these once before and after (in sequential
 104   // code) any threads call oops_into_collection_set_do.  (This offers an
 105   // opportunity to sequential setup and teardown of structures needed by a
 106   // parallel iteration over the CS's RS.)
 107   void prepare_for_oops_into_collection_set_do();
 108   void cleanup_after_oops_into_collection_set_do();
 109 
 110   void scanRS(OopsInHeapRegionClosure* oc, int worker_i);
 111   void updateRS(DirtyCardQueue* into_cset_dcq, int worker_i);
 112 
 113   HeapRegion* calculateStartRegion(int i);
 114 
 115   CardTableModRefBS* ct_bs() { return _ct_bs; }
 116   size_t cardsScanned() { return _total_cards_scanned; }
 117 
 118   // Record, if necessary, the fact that *p (where "p" is in region "from",
 119   // which is required to be non-NULL) has changed to a new non-NULL value.
 120   // [Below the virtual version calls a non-virtual protected
 121   // workhorse that is templatified for narrow vs wide oop.]
 122   inline void write_ref(HeapRegion* from, oop* p) {
 123     write_ref_nv(from, p);
 124   }
 125   inline void write_ref(HeapRegion* from, narrowOop* p) {
 126     write_ref_nv(from, p);
 127   }
 128   inline void par_write_ref(HeapRegion* from, oop* p, int tid) {
 129     par_write_ref_nv(from, p, tid);
 130   }
 131   inline void par_write_ref(HeapRegion* from, narrowOop* p, int tid) {
 132     par_write_ref_nv(from, p, tid);
 133   }
 134 
 135   bool self_forwarded(oop obj);
 136 
 137   // Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
 138   // or card, respectively, such that a region or card with a corresponding
 139   // 0 bit contains no part of any live object.  Eliminates any remembered
 140   // set entries that correspond to dead heap ranges.
 141   void scrub(BitMap* region_bm, BitMap* card_bm);
 142 
 143   // Like the above, but assumes is called in parallel: "worker_num" is the
 144   // parallel thread id of the current thread, and "claim_val" is the
 145   // value that should be used to claim heap regions.
 146   void scrub_par(BitMap* region_bm, BitMap* card_bm,
 147                  int worker_num, int claim_val);
 148 
 149   // Refine the card corresponding to "card_ptr".  If "sts" is non-NULL,
 150   // join and leave around parts that must be atomic wrt GC.  (NULL means
 151   // being done at a safepoint.)
 152   // If check_for_refs_into_cset is true, a true result is returned
 153   // if the given card contains oops that have references into the
 154   // current collection set.
 155   virtual bool concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
 156                                        bool check_for_refs_into_cset);
 157 
 158   // Print any relevant summary info.
 159   virtual void print_summary_info();
 160 
 161   // Prepare remembered set for verification.
 162   virtual void prepare_for_verify();
 163 };
 164 
 165 #define G1_REM_SET_LOGGING 0
 166 
 167 class CountNonCleanMemRegionClosure: public MemRegionClosure {
 168   G1CollectedHeap* _g1;
 169   int _n;
 170   HeapWord* _start_first;
 171 public:
 172   CountNonCleanMemRegionClosure(G1CollectedHeap* g1) :
 173     _g1(g1), _n(0), _start_first(NULL)
 174   {}
 175   void do_MemRegion(MemRegion mr);
 176   int n() { return _n; };
 177   HeapWord* start_first() { return _start_first; }
 178 };
 179 
 180 class UpdateRSOopClosure: public OopClosure {
 181   HeapRegion* _from;
 182   G1RemSet* _rs;
 183   int _worker_i;
 184 
 185   template <class T> void do_oop_work(T* p);
 186 
 187 public:
 188   UpdateRSOopClosure(G1RemSet* rs, int worker_i = 0) :
 189     _from(NULL), _rs(rs), _worker_i(worker_i) {
 190     guarantee(_rs != NULL, "Requires an HRIntoG1RemSet");
 191   }
 192 
 193   void set_from(HeapRegion* from) {
 194     assert(from != NULL, "from region must be non-NULL");
 195     _from = from;
 196   }
 197 
 198   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
 199   virtual void do_oop(oop* p)       { do_oop_work(p); }
 200 
 201   // Override: this closure is idempotent.
 202   //  bool idempotent() { return true; }
 203   bool apply_to_weak_ref_discovered_field() { return true; }
 204 };
 205 
 206 class UpdateRSetImmediate: public OopsInHeapRegionClosure {
 207 private:
 208   G1RemSet* _g1_rem_set;
 209 
 210   template <class T> void do_oop_work(T* p);
 211 public:
 212   UpdateRSetImmediate(G1RemSet* rs) :
 213     _g1_rem_set(rs) {}
 214 
 215   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
 216   virtual void do_oop(      oop* p) { do_oop_work(p); }
 217 };