20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_CMS_CMSOOPCLOSURES_HPP
26 #define SHARE_VM_GC_CMS_CMSOOPCLOSURES_HPP
27
28 #include "gc/shared/genOopClosures.hpp"
29 #include "gc/shared/taskqueue.hpp"
30 #include "memory/iterator.hpp"
31
32 /////////////////////////////////////////////////////////////////
33 // Closures used by ConcurrentMarkSweepGeneration's collector
34 /////////////////////////////////////////////////////////////////
35 class ConcurrentMarkSweepGeneration;
36 class CMSBitMap;
37 class CMSMarkStack;
38 class CMSCollector;
39 class MarkFromRootsClosure;
40 class Par_MarkFromRootsClosure;
41
42 // Decode the oop and call do_oop on it.
43 #define DO_OOP_WORK_DEFN \
44 void do_oop(oop obj); \
45 template <class T> inline void do_oop_work(T* p) { \
46 T heap_oop = oopDesc::load_heap_oop(p); \
47 if (!oopDesc::is_null(heap_oop)) { \
48 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); \
49 do_oop(obj); \
50 } \
51 }
52
53 // TODO: This duplication of the MetadataAwareOopClosure class is only needed
54 // because some CMS OopClosures derive from OopsInGenClosure. It would be
55 // good to get rid of them completely.
56 class MetadataAwareOopsInGenClosure: public OopsInGenClosure {
57 KlassToOopClosure _klass_closure;
58 public:
59 MetadataAwareOopsInGenClosure() {
60 _klass_closure.initialize(this);
65
66 virtual void do_klass(Klass* k);
67 void do_klass_nv(Klass* k);
68
69 virtual void do_cld(ClassLoaderData* cld) { do_cld_nv(cld); }
70 void do_cld_nv(ClassLoaderData* cld);
71 };
72
73 class MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
74 private:
75 const MemRegion _span;
76 CMSBitMap* _bitMap;
77 protected:
78 DO_OOP_WORK_DEFN
79 public:
80 MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
81 virtual void do_oop(oop* p);
82 virtual void do_oop(narrowOop* p);
83 };
84
85 class Par_MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
86 private:
87 const MemRegion _span;
88 CMSBitMap* _bitMap;
89 protected:
90 DO_OOP_WORK_DEFN
91 public:
92 Par_MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
93 virtual void do_oop(oop* p);
94 virtual void do_oop(narrowOop* p);
95 };
96
97 // A variant of the above used in certain kinds of CMS
98 // marking verification.
99 class MarkRefsIntoVerifyClosure: public MetadataAwareOopsInGenClosure {
100 private:
101 const MemRegion _span;
102 CMSBitMap* _verification_bm;
103 CMSBitMap* _cms_bm;
104 protected:
105 DO_OOP_WORK_DEFN
106 public:
107 MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm,
108 CMSBitMap* cms_bm);
109 virtual void do_oop(oop* p);
110 virtual void do_oop(narrowOop* p);
111 };
112
124 public:
125 PushAndMarkClosure(CMSCollector* collector,
126 MemRegion span,
127 ReferenceProcessor* rp,
128 CMSBitMap* bit_map,
129 CMSBitMap* mod_union_table,
130 CMSMarkStack* mark_stack,
131 bool concurrent_precleaning);
132 virtual void do_oop(oop* p);
133 virtual void do_oop(narrowOop* p);
134 inline void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop_work(p); }
135 inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
136 };
137
138 // In the parallel case, the bit map and the
139 // reference processor are currently all shared. Access to
140 // these shared mutable structures must use appropriate
141 // synchronization (for instance, via CAS). The marking stack
142 // used in the non-parallel case above is here replaced with
143 // an OopTaskQueue structure to allow efficient work stealing.
144 class Par_PushAndMarkClosure: public MetadataAwareOopClosure {
145 private:
146 CMSCollector* _collector;
147 MemRegion _span;
148 CMSBitMap* _bit_map;
149 OopTaskQueue* _work_queue;
150 protected:
151 DO_OOP_WORK_DEFN
152 public:
153 Par_PushAndMarkClosure(CMSCollector* collector,
154 MemRegion span,
155 ReferenceProcessor* rp,
156 CMSBitMap* bit_map,
157 OopTaskQueue* work_queue);
158 virtual void do_oop(oop* p);
159 virtual void do_oop(narrowOop* p);
160 inline void do_oop_nv(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
161 inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
162 };
163
164 // The non-parallel version (the parallel version appears further below).
165 class MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
166 private:
167 MemRegion _span;
168 CMSBitMap* _bit_map;
169 CMSMarkStack* _mark_stack;
170 PushAndMarkClosure _pushAndMarkClosure;
171 CMSCollector* _collector;
172 Mutex* _freelistLock;
173 bool _yield;
174 // Whether closure is being used for concurrent precleaning
175 bool _concurrent_precleaning;
176 protected:
177 DO_OOP_WORK_DEFN
178 public:
179 MarkRefsIntoAndScanClosure(MemRegion span,
180 ReferenceProcessor* rp,
181 CMSBitMap* bit_map,
186 bool concurrent_precleaning);
187 virtual void do_oop(oop* p);
188 virtual void do_oop(narrowOop* p);
189 inline void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
190 inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
191
192 void set_freelistLock(Mutex* m) {
193 _freelistLock = m;
194 }
195
196 private:
197 inline void do_yield_check();
198 void do_yield_work();
199 bool take_from_overflow_list();
200 };
201
202 // In this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
203 // stack and the bitMap are shared, so access needs to be suitably
204 // synchronized. An OopTaskQueue structure, supporting efficient
205 // work stealing, replaces a CMSMarkStack for storing grey objects.
206 class Par_MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
207 private:
208 MemRegion _span;
209 CMSBitMap* _bit_map;
210 OopTaskQueue* _work_queue;
211 const uint _low_water_mark;
212 Par_PushAndMarkClosure _par_pushAndMarkClosure;
213 protected:
214 DO_OOP_WORK_DEFN
215 public:
216 Par_MarkRefsIntoAndScanClosure(CMSCollector* collector,
217 MemRegion span,
218 ReferenceProcessor* rp,
219 CMSBitMap* bit_map,
220 OopTaskQueue* work_queue);
221 virtual void do_oop(oop* p);
222 virtual void do_oop(narrowOop* p);
223 inline void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
224 inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
225
226 void trim_queue(uint size);
227 };
228
229 // This closure is used during the concurrent marking phase
230 // following the first checkpoint. Its use is buried in
231 // the closure MarkFromRootsClosure.
232 class PushOrMarkClosure: public MetadataAwareOopClosure {
233 private:
234 CMSCollector* _collector;
235 MemRegion _span;
236 CMSBitMap* _bitMap;
237 CMSMarkStack* _markStack;
238 HeapWord* const _finger;
239 MarkFromRootsClosure* const
240 _parent;
241 protected:
242 DO_OOP_WORK_DEFN
243 public:
244 PushOrMarkClosure(CMSCollector* cms_collector,
245 MemRegion span,
246 CMSBitMap* bitMap,
247 CMSMarkStack* markStack,
248 HeapWord* finger,
249 MarkFromRootsClosure* parent);
250 virtual void do_oop(oop* p);
251 virtual void do_oop(narrowOop* p);
252 inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); }
253 inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
254
255 // Deal with a stack overflow condition
256 void handle_stack_overflow(HeapWord* lost);
257 private:
258 inline void do_yield_check();
259 };
260
261 // A parallel (MT) version of the above.
262 // This closure is used during the concurrent marking phase
263 // following the first checkpoint. Its use is buried in
264 // the closure Par_MarkFromRootsClosure.
265 class Par_PushOrMarkClosure: public MetadataAwareOopClosure {
266 private:
267 CMSCollector* _collector;
268 MemRegion _whole_span;
269 MemRegion _span; // local chunk
270 CMSBitMap* _bit_map;
271 OopTaskQueue* _work_queue;
272 CMSMarkStack* _overflow_stack;
273 HeapWord* const _finger;
274 HeapWord** const _global_finger_addr;
275 Par_MarkFromRootsClosure* const
276 _parent;
277 protected:
278 DO_OOP_WORK_DEFN
279 public:
280 Par_PushOrMarkClosure(CMSCollector* cms_collector,
281 MemRegion span,
282 CMSBitMap* bit_map,
283 OopTaskQueue* work_queue,
284 CMSMarkStack* mark_stack,
285 HeapWord* finger,
286 HeapWord** global_finger_addr,
287 Par_MarkFromRootsClosure* parent);
288 virtual void do_oop(oop* p);
289 virtual void do_oop(narrowOop* p);
290 inline void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
291 inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
292
293 // Deal with a stack overflow condition
294 void handle_stack_overflow(HeapWord* lost);
295 private:
296 inline void do_yield_check();
297 };
298
299 // For objects in CMS generation, this closure marks
300 // given objects (transitively) as being reachable/live.
301 // This is currently used during the (weak) reference object
302 // processing phase of the CMS final checkpoint step, as
303 // well as during the concurrent precleaning of the discovered
304 // reference lists.
305 class CMSKeepAliveClosure: public MetadataAwareOopClosure {
306 private:
307 CMSCollector* _collector;
308 const MemRegion _span;
309 CMSMarkStack* _mark_stack;
310 CMSBitMap* _bit_map;
311 bool _concurrent_precleaning;
|
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_CMS_CMSOOPCLOSURES_HPP
26 #define SHARE_VM_GC_CMS_CMSOOPCLOSURES_HPP
27
28 #include "gc/shared/genOopClosures.hpp"
29 #include "gc/shared/taskqueue.hpp"
30 #include "memory/iterator.hpp"
31
32 /////////////////////////////////////////////////////////////////
33 // Closures used by ConcurrentMarkSweepGeneration's collector
34 /////////////////////////////////////////////////////////////////
35 class ConcurrentMarkSweepGeneration;
36 class CMSBitMap;
37 class CMSMarkStack;
38 class CMSCollector;
39 class MarkFromRootsClosure;
40 class ParMarkFromRootsClosure;
41
42 // Decode the oop and call do_oop on it.
43 #define DO_OOP_WORK_DEFN \
44 void do_oop(oop obj); \
45 template <class T> inline void do_oop_work(T* p) { \
46 T heap_oop = oopDesc::load_heap_oop(p); \
47 if (!oopDesc::is_null(heap_oop)) { \
48 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); \
49 do_oop(obj); \
50 } \
51 }
52
53 // TODO: This duplication of the MetadataAwareOopClosure class is only needed
54 // because some CMS OopClosures derive from OopsInGenClosure. It would be
55 // good to get rid of them completely.
56 class MetadataAwareOopsInGenClosure: public OopsInGenClosure {
57 KlassToOopClosure _klass_closure;
58 public:
59 MetadataAwareOopsInGenClosure() {
60 _klass_closure.initialize(this);
65
66 virtual void do_klass(Klass* k);
67 void do_klass_nv(Klass* k);
68
69 virtual void do_cld(ClassLoaderData* cld) { do_cld_nv(cld); }
70 void do_cld_nv(ClassLoaderData* cld);
71 };
72
73 class MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
74 private:
75 const MemRegion _span;
76 CMSBitMap* _bitMap;
77 protected:
78 DO_OOP_WORK_DEFN
79 public:
80 MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
81 virtual void do_oop(oop* p);
82 virtual void do_oop(narrowOop* p);
83 };
84
85 class ParMarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
86 private:
87 const MemRegion _span;
88 CMSBitMap* _bitMap;
89 protected:
90 DO_OOP_WORK_DEFN
91 public:
92 ParMarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
93 virtual void do_oop(oop* p);
94 virtual void do_oop(narrowOop* p);
95 };
96
97 // A variant of the above used in certain kinds of CMS
98 // marking verification.
99 class MarkRefsIntoVerifyClosure: public MetadataAwareOopsInGenClosure {
100 private:
101 const MemRegion _span;
102 CMSBitMap* _verification_bm;
103 CMSBitMap* _cms_bm;
104 protected:
105 DO_OOP_WORK_DEFN
106 public:
107 MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm,
108 CMSBitMap* cms_bm);
109 virtual void do_oop(oop* p);
110 virtual void do_oop(narrowOop* p);
111 };
112
124 public:
125 PushAndMarkClosure(CMSCollector* collector,
126 MemRegion span,
127 ReferenceProcessor* rp,
128 CMSBitMap* bit_map,
129 CMSBitMap* mod_union_table,
130 CMSMarkStack* mark_stack,
131 bool concurrent_precleaning);
132 virtual void do_oop(oop* p);
133 virtual void do_oop(narrowOop* p);
134 inline void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop_work(p); }
135 inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
136 };
137
138 // In the parallel case, the bit map and the
139 // reference processor are currently all shared. Access to
140 // these shared mutable structures must use appropriate
141 // synchronization (for instance, via CAS). The marking stack
142 // used in the non-parallel case above is here replaced with
143 // an OopTaskQueue structure to allow efficient work stealing.
144 class ParPushAndMarkClosure: public MetadataAwareOopClosure {
145 private:
146 CMSCollector* _collector;
147 MemRegion _span;
148 CMSBitMap* _bit_map;
149 OopTaskQueue* _work_queue;
150 protected:
151 DO_OOP_WORK_DEFN
152 public:
153 ParPushAndMarkClosure(CMSCollector* collector,
154 MemRegion span,
155 ReferenceProcessor* rp,
156 CMSBitMap* bit_map,
157 OopTaskQueue* work_queue);
158 virtual void do_oop(oop* p);
159 virtual void do_oop(narrowOop* p);
160 inline void do_oop_nv(oop* p) { ParPushAndMarkClosure::do_oop_work(p); }
161 inline void do_oop_nv(narrowOop* p) { ParPushAndMarkClosure::do_oop_work(p); }
162 };
163
164 // The non-parallel version (the parallel version appears further below).
165 class MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
166 private:
167 MemRegion _span;
168 CMSBitMap* _bit_map;
169 CMSMarkStack* _mark_stack;
170 PushAndMarkClosure _pushAndMarkClosure;
171 CMSCollector* _collector;
172 Mutex* _freelistLock;
173 bool _yield;
174 // Whether closure is being used for concurrent precleaning
175 bool _concurrent_precleaning;
176 protected:
177 DO_OOP_WORK_DEFN
178 public:
179 MarkRefsIntoAndScanClosure(MemRegion span,
180 ReferenceProcessor* rp,
181 CMSBitMap* bit_map,
186 bool concurrent_precleaning);
187 virtual void do_oop(oop* p);
188 virtual void do_oop(narrowOop* p);
189 inline void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
190 inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
191
192 void set_freelistLock(Mutex* m) {
193 _freelistLock = m;
194 }
195
196 private:
197 inline void do_yield_check();
198 void do_yield_work();
199 bool take_from_overflow_list();
200 };
201
202 // In this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
203 // stack and the bitMap are shared, so access needs to be suitably
204 // synchronized. An OopTaskQueue structure, supporting efficient
205 // work stealing, replaces a CMSMarkStack for storing grey objects.
206 class ParMarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
207 private:
208 MemRegion _span;
209 CMSBitMap* _bit_map;
210 OopTaskQueue* _work_queue;
211 const uint _low_water_mark;
212 ParPushAndMarkClosure _parPushAndMarkClosure;
213 protected:
214 DO_OOP_WORK_DEFN
215 public:
216 ParMarkRefsIntoAndScanClosure(CMSCollector* collector,
217 MemRegion span,
218 ReferenceProcessor* rp,
219 CMSBitMap* bit_map,
220 OopTaskQueue* work_queue);
221 virtual void do_oop(oop* p);
222 virtual void do_oop(narrowOop* p);
223 inline void do_oop_nv(oop* p) { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
224 inline void do_oop_nv(narrowOop* p) { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
225
226 void trim_queue(uint size);
227 };
228
229 // This closure is used during the concurrent marking phase
230 // following the first checkpoint. Its use is buried in
231 // the closure MarkFromRootsClosure.
232 class PushOrMarkClosure: public MetadataAwareOopClosure {
233 private:
234 CMSCollector* _collector;
235 MemRegion _span;
236 CMSBitMap* _bitMap;
237 CMSMarkStack* _markStack;
238 HeapWord* const _finger;
239 MarkFromRootsClosure* const
240 _parent;
241 protected:
242 DO_OOP_WORK_DEFN
243 public:
244 PushOrMarkClosure(CMSCollector* cms_collector,
245 MemRegion span,
246 CMSBitMap* bitMap,
247 CMSMarkStack* markStack,
248 HeapWord* finger,
249 MarkFromRootsClosure* parent);
250 virtual void do_oop(oop* p);
251 virtual void do_oop(narrowOop* p);
252 inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); }
253 inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
254
255 // Deal with a stack overflow condition
256 void handle_stack_overflow(HeapWord* lost);
257 private:
258 inline void do_yield_check();
259 };
260
261 // A parallel (MT) version of the above.
262 // This closure is used during the concurrent marking phase
263 // following the first checkpoint. Its use is buried in
264 // the closure ParMarkFromRootsClosure.
265 class ParPushOrMarkClosure: public MetadataAwareOopClosure {
266 private:
267 CMSCollector* _collector;
268 MemRegion _whole_span;
269 MemRegion _span; // local chunk
270 CMSBitMap* _bit_map;
271 OopTaskQueue* _work_queue;
272 CMSMarkStack* _overflow_stack;
273 HeapWord* const _finger;
274 HeapWord** const _global_finger_addr;
275 ParMarkFromRootsClosure* const
276 _parent;
277 protected:
278 DO_OOP_WORK_DEFN
279 public:
280 ParPushOrMarkClosure(CMSCollector* cms_collector,
281 MemRegion span,
282 CMSBitMap* bit_map,
283 OopTaskQueue* work_queue,
284 CMSMarkStack* mark_stack,
285 HeapWord* finger,
286 HeapWord** global_finger_addr,
287 ParMarkFromRootsClosure* parent);
288 virtual void do_oop(oop* p);
289 virtual void do_oop(narrowOop* p);
290 inline void do_oop_nv(oop* p) { ParPushOrMarkClosure::do_oop_work(p); }
291 inline void do_oop_nv(narrowOop* p) { ParPushOrMarkClosure::do_oop_work(p); }
292
293 // Deal with a stack overflow condition
294 void handle_stack_overflow(HeapWord* lost);
295 private:
296 inline void do_yield_check();
297 };
298
299 // For objects in CMS generation, this closure marks
300 // given objects (transitively) as being reachable/live.
301 // This is currently used during the (weak) reference object
302 // processing phase of the CMS final checkpoint step, as
303 // well as during the concurrent precleaning of the discovered
304 // reference lists.
305 class CMSKeepAliveClosure: public MetadataAwareOopClosure {
306 private:
307 CMSCollector* _collector;
308 const MemRegion _span;
309 CMSMarkStack* _mark_stack;
310 CMSBitMap* _bit_map;
311 bool _concurrent_precleaning;
|