1 /* 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_HPP 27 28 // A G1RemSet provides ways of iterating over pointers into a selected 29 // collection set. 30 31 class G1CollectedHeap; 32 class CardTableModRefBarrierSet; 33 class ConcurrentG1Refine; 34 35 // A G1RemSet in which each heap region has a rem set that records the 36 // external heap references into it. Uses a mod ref bs to track updates, 37 // so that they can be used to update the individual region remsets. 38 39 class G1RemSet: public CHeapObj<mtGC> { 40 protected: 41 G1CollectedHeap* _g1; 42 unsigned _conc_refine_cards; 43 uint n_workers(); 44 45 protected: 46 enum SomePrivateConstants { 47 UpdateRStoMergeSync = 0, 48 MergeRStoDoDirtySync = 1, 49 DoDirtySync = 2, 50 LastSync = 3, 51 52 SeqTask = 0, 53 NumSeqTasks = 1 54 }; 55 56 CardTableModRefBS* _ct_bs; 57 SubTasksDone* _seq_task; 58 G1CollectorPolicy* _g1p; 59 60 ConcurrentG1Refine* _cg1r; 61 62 size_t* _cards_scanned; 63 size_t _total_cards_scanned; 64 65 // Used for caching the closure that is responsible for scanning 66 // references into the collection set. 67 OopsInHeapRegionClosure** _cset_rs_update_cl; 68 69 // The routine that performs the actual work of refining a dirty 70 // card. 71 // If check_for_refs_into_refs is true then a true result is returned 72 // if the card contains oops that have references into the current 73 // collection set. 74 bool concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i, 75 bool check_for_refs_into_cset); 76 77 public: 78 // This is called to reset dual hash tables after the gc pause 79 // is finished and the initial hash table is no longer being 80 // scanned. 81 void cleanupHRRS(); 82 83 G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs); 84 ~G1RemSet(); 85 86 // Invoke "blk->do_oop" on all pointers into the CS in objects in regions 87 // outside the CS (having invoked "blk->set_region" to set the "from" 88 // region correctly beforehand.) The "worker_i" param is for the 89 // parallel case where the number of the worker thread calling this 90 // function can be helpful in partitioning the work to be done. It 91 // should be the same as the "i" passed to the calling thread's 92 // work(i) function. In the sequential case this param will be ingored. 93 void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, 94 int worker_i); 95 96 // Prepare for and cleanup after an oops_into_collection_set_do 97 // call. Must call each of these once before and after (in sequential 98 // code) any threads call oops_into_collection_set_do. (This offers an 99 // opportunity to sequential setup and teardown of structures needed by a 100 // parallel iteration over the CS's RS.) 101 void prepare_for_oops_into_collection_set_do(); 102 void cleanup_after_oops_into_collection_set_do(); 103 104 void scanRS(OopsInHeapRegionClosure* oc, int worker_i); 105 void updateRS(DirtyCardQueue* into_cset_dcq, int worker_i); 106 107 CardTableModRefBS* ct_bs() { return _ct_bs; } 108 size_t cardsScanned() { return _total_cards_scanned; } 109 110 // Record, if necessary, the fact that *p (where "p" is in region "from", 111 // which is required to be non-NULL) has changed to a new non-NULL value. 112 template <class T> void write_ref(HeapRegion* from, T* p); 113 template <class T> void par_write_ref(HeapRegion* from, T* p, int tid); 114 115 // Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region 116 // or card, respectively, such that a region or card with a corresponding 117 // 0 bit contains no part of any live object. Eliminates any remembered 118 // set entries that correspond to dead heap ranges. 119 void scrub(BitMap* region_bm, BitMap* card_bm); 120 121 // Like the above, but assumes is called in parallel: "worker_num" is the 122 // parallel thread id of the current thread, and "claim_val" is the 123 // value that should be used to claim heap regions. 124 void scrub_par(BitMap* region_bm, BitMap* card_bm, 125 uint worker_num, int claim_val); 126 127 // Refine the card corresponding to "card_ptr". If "sts" is non-NULL, 128 // join and leave around parts that must be atomic wrt GC. (NULL means 129 // being done at a safepoint.) 130 // If check_for_refs_into_cset is true, a true result is returned 131 // if the given card contains oops that have references into the 132 // current collection set. 133 virtual bool concurrentRefineOneCard(jbyte* card_ptr, int worker_i, 134 bool check_for_refs_into_cset); 135 136 // Print any relevant summary info. 137 virtual void print_summary_info(); 138 139 // Prepare remembered set for verification. 140 virtual void prepare_for_verify(); 141 }; 142 143 class CountNonCleanMemRegionClosure: public MemRegionClosure { 144 G1CollectedHeap* _g1; 145 int _n; 146 HeapWord* _start_first; 147 public: 148 CountNonCleanMemRegionClosure(G1CollectedHeap* g1) : 149 _g1(g1), _n(0), _start_first(NULL) 150 {} 151 void do_MemRegion(MemRegion mr); 152 int n() { return _n; }; 153 HeapWord* start_first() { return _start_first; } 154 }; 155 156 class UpdateRSOopClosure: public ExtendedOopClosure { 157 HeapRegion* _from; 158 G1RemSet* _rs; 159 int _worker_i; 160 161 template <class T> void do_oop_work(T* p); 162 163 public: 164 UpdateRSOopClosure(G1RemSet* rs, int worker_i = 0) : 165 _from(NULL), _rs(rs), _worker_i(worker_i) 166 {} 167 168 void set_from(HeapRegion* from) { 169 assert(from != NULL, "from region must be non-NULL"); 170 _from = from; 171 } 172 173 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 174 virtual void do_oop(oop* p) { do_oop_work(p); } 175 176 // Override: this closure is idempotent. 177 // bool idempotent() { return true; } 178 bool apply_to_weak_ref_discovered_field() { return true; } 179 }; 180 181 class UpdateRSetImmediate: public OopsInHeapRegionClosure { 182 private: 183 G1RemSet* _g1_rem_set; 184 185 template <class T> void do_oop_work(T* p); 186 public: 187 UpdateRSetImmediate(G1RemSet* rs) : 188 _g1_rem_set(rs) {} 189 190 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 191 virtual void do_oop( oop* p) { do_oop_work(p); } 192 }; 193 194 195 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_HPP