1 /* 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP 27 28 class HeapRegion; 29 class G1CollectedHeap; 30 class G1RemSet; 31 class ConcurrentMark; 32 class DirtyCardToOopClosure; 33 class CMBitMap; 34 class CMMarkStack; 35 class G1ParScanThreadState; 36 class CMTask; 37 class ReferenceProcessor; 38 39 // A class that scans oops in a given heap region (much as OopsInGenClosure 40 // scans oops in a generation.) 41 class OopsInHeapRegionClosure: public ExtendedOopClosure { 42 protected: 43 HeapRegion* _from; 44 public: 45 void set_region(HeapRegion* from) { _from = from; } 46 }; 47 48 class G1ParClosureSuper : public OopsInHeapRegionClosure { 49 protected: 50 G1CollectedHeap* _g1; 51 G1ParScanThreadState* _par_scan_state; 52 uint _worker_id; 53 public: 54 // Initializes the instance, leaving _par_scan_state uninitialized. Must be done 55 // later using the set_par_scan_thread_state() method. 56 G1ParClosureSuper(G1CollectedHeap* g1); 57 G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state); 58 bool apply_to_weak_ref_discovered_field() { return true; } 59 60 void set_par_scan_thread_state(G1ParScanThreadState* par_scan_state); 61 }; 62 63 class G1ParPushHeapRSClosure : public G1ParClosureSuper { 64 public: 65 G1ParPushHeapRSClosure(G1CollectedHeap* g1, 66 G1ParScanThreadState* par_scan_state): 67 G1ParClosureSuper(g1, par_scan_state) { } 68 69 template <class T> void do_oop_nv(T* p); 70 virtual void do_oop(oop* p) { do_oop_nv(p); } 71 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } 72 }; 73 74 class G1ParScanClosure : public G1ParClosureSuper { 75 public: 76 G1ParScanClosure(G1CollectedHeap* g1, ReferenceProcessor* rp) : 77 G1ParClosureSuper(g1) { 78 assert(_ref_processor == NULL, "sanity"); 79 _ref_processor = rp; 80 } 81 82 template <class T> void do_oop_nv(T* p); 83 virtual void do_oop(oop* p) { do_oop_nv(p); } 84 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } 85 }; 86 87 // Add back base class for metadata 88 class G1ParCopyHelper : public G1ParClosureSuper { 89 protected: 90 Klass* _scanned_klass; 91 ConcurrentMark* _cm; 92 93 // Mark the object if it's not already marked. This is used to mark 94 // objects pointed to by roots that are guaranteed not to move 95 // during the GC (i.e., non-CSet objects). It is MT-safe. 96 void mark_object(oop obj); 97 98 // Mark the object if it's not already marked. This is used to mark 99 // objects pointed to by roots that have been forwarded during a 100 // GC. It is MT-safe. 101 void mark_forwarded_object(oop from_obj, oop to_obj); 102 public: 103 G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state); 104 105 void set_scanned_klass(Klass* k) { _scanned_klass = k; } 106 template <class T> void do_klass_barrier(T* p, oop new_obj); 107 }; 108 109 template <G1Barrier barrier, bool do_mark_object> 110 class G1ParCopyClosure : public G1ParCopyHelper { 111 private: 112 template <class T> void do_oop_work(T* p); 113 114 public: 115 G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, 116 ReferenceProcessor* rp) : 117 G1ParCopyHelper(g1, par_scan_state) { 118 assert(_ref_processor == NULL, "sanity"); 119 } 120 121 template <class T> void do_oop_nv(T* p) { do_oop_work(p); } 122 virtual void do_oop(oop* p) { do_oop_nv(p); } 123 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } 124 }; 125 126 typedef G1ParCopyClosure<G1BarrierNone, false> G1ParScanExtRootClosure; 127 typedef G1ParCopyClosure<G1BarrierKlass, false> G1ParScanMetadataClosure; 128 129 130 typedef G1ParCopyClosure<G1BarrierNone, true> G1ParScanAndMarkExtRootClosure; 131 typedef G1ParCopyClosure<G1BarrierKlass, true> G1ParScanAndMarkMetadataClosure; 132 133 // We use a separate closure to handle references during evacuation 134 // failure processing. 135 136 typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure; 137 138 class FilterIntoCSClosure: public ExtendedOopClosure { 139 G1CollectedHeap* _g1; 140 OopClosure* _oc; 141 DirtyCardToOopClosure* _dcto_cl; 142 public: 143 FilterIntoCSClosure( DirtyCardToOopClosure* dcto_cl, 144 G1CollectedHeap* g1, 145 OopClosure* oc) : 146 _dcto_cl(dcto_cl), _g1(g1), _oc(oc) { } 147 148 template <class T> void do_oop_nv(T* p); 149 virtual void do_oop(oop* p) { do_oop_nv(p); } 150 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } 151 bool apply_to_weak_ref_discovered_field() { return true; } 152 }; 153 154 class FilterOutOfRegionClosure: public ExtendedOopClosure { 155 HeapWord* _r_bottom; 156 HeapWord* _r_end; 157 OopClosure* _oc; 158 public: 159 FilterOutOfRegionClosure(HeapRegion* r, OopClosure* oc); 160 template <class T> void do_oop_nv(T* p); 161 virtual void do_oop(oop* p) { do_oop_nv(p); } 162 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } 163 bool apply_to_weak_ref_discovered_field() { return true; } 164 }; 165 166 // Closure for iterating over object fields during concurrent marking 167 class G1CMOopClosure : public ExtendedOopClosure { 168 private: 169 G1CollectedHeap* _g1h; 170 ConcurrentMark* _cm; 171 CMTask* _task; 172 public: 173 G1CMOopClosure(G1CollectedHeap* g1h, ConcurrentMark* cm, CMTask* task); 174 template <class T> void do_oop_nv(T* p); 175 virtual void do_oop( oop* p) { do_oop_nv(p); } 176 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } 177 }; 178 179 // Closure to scan the root regions during concurrent marking 180 class G1RootRegionScanClosure : public ExtendedOopClosure { 181 private: 182 G1CollectedHeap* _g1h; 183 ConcurrentMark* _cm; 184 uint _worker_id; 185 public: 186 G1RootRegionScanClosure(G1CollectedHeap* g1h, ConcurrentMark* cm, 187 uint worker_id) : 188 _g1h(g1h), _cm(cm), _worker_id(worker_id) { } 189 template <class T> void do_oop_nv(T* p); 190 virtual void do_oop( oop* p) { do_oop_nv(p); } 191 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } 192 }; 193 194 // Closure that applies the given two closures in sequence. 195 // Used by the RSet refinement code (when updating RSets 196 // during an evacuation pause) to record cards containing 197 // pointers into the collection set. 198 199 class G1Mux2Closure : public ExtendedOopClosure { 200 OopClosure* _c1; 201 OopClosure* _c2; 202 public: 203 G1Mux2Closure(OopClosure *c1, OopClosure *c2); 204 template <class T> void do_oop_nv(T* p); 205 virtual void do_oop(oop* p) { do_oop_nv(p); } 206 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } 207 }; 208 209 // A closure that returns true if it is actually applied 210 // to a reference 211 212 class G1TriggerClosure : public ExtendedOopClosure { 213 bool _triggered; 214 public: 215 G1TriggerClosure(); 216 bool triggered() const { return _triggered; } 217 template <class T> void do_oop_nv(T* p); 218 virtual void do_oop(oop* p) { do_oop_nv(p); } 219 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } 220 }; 221 222 // A closure which uses a triggering closure to determine 223 // whether to apply an oop closure. 224 225 class G1InvokeIfNotTriggeredClosure: public ExtendedOopClosure { 226 G1TriggerClosure* _trigger_cl; 227 OopClosure* _oop_cl; 228 public: 229 G1InvokeIfNotTriggeredClosure(G1TriggerClosure* t, OopClosure* oc); 230 template <class T> void do_oop_nv(T* p); 231 virtual void do_oop(oop* p) { do_oop_nv(p); } 232 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } 233 }; 234 235 class G1UpdateRSOrPushRefOopClosure: public ExtendedOopClosure { 236 G1CollectedHeap* _g1; 237 G1RemSet* _g1_rem_set; 238 HeapRegion* _from; 239 OopsInHeapRegionClosure* _push_ref_cl; 240 bool _record_refs_into_cset; 241 uint _worker_i; 242 243 public: 244 G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h, 245 G1RemSet* rs, 246 OopsInHeapRegionClosure* push_ref_cl, 247 bool record_refs_into_cset, 248 uint worker_i = 0); 249 250 void set_from(HeapRegion* from) { 251 assert(from != NULL, "from region must be non-NULL"); 252 _from = from; 253 } 254 255 bool self_forwarded(oop obj) { 256 bool result = (obj->is_forwarded() && (obj->forwardee()== obj)); 257 return result; 258 } 259 260 bool apply_to_weak_ref_discovered_field() { return true; } 261 262 template <class T> void do_oop_nv(T* p); 263 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } 264 virtual void do_oop(oop* p) { do_oop_nv(p); } 265 }; 266 267 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP