1 /*
2 * Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
25 #ifndef SHARE_VM_GC_CMS_CMSOOPCLOSURES_HPP
26 #define SHARE_VM_GC_CMS_CMSOOPCLOSURES_HPP
27
28 #include "gc/shared/genOopClosures.hpp"
29 #include "gc/shared/taskqueue.hpp"
30 #include "memory/iterator.hpp"
31
32 /////////////////////////////////////////////////////////////////
33 // Closures used by ConcurrentMarkSweepGeneration's collector
34 /////////////////////////////////////////////////////////////////
35 class ConcurrentMarkSweepGeneration;
36 class CMSBitMap;
37 class CMSMarkStack;
38 class CMSCollector;
39 class MarkFromRootsClosure;
40 class ParMarkFromRootsClosure;
41
42 // Decode the oop and call do_oop on it.
43 #define DO_OOP_WORK_DEFN \
44 void do_oop(oop obj); \
45 template <class T> inline void do_oop_work(T* p) { \
46 T heap_oop = oopDesc::load_heap_oop(p); \
47 if (!oopDesc::is_null(heap_oop)) { \
48 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); \
49 do_oop(obj); \
50 } \
51 }
52
53 // TODO: This duplication of the MetadataAwareOopClosure class is only needed
54 // because some CMS OopClosures derive from OopsInGenClosure. It would be
55 // good to get rid of them completely.
56 class MetadataAwareOopsInGenClosure: public OopsInGenClosure {
57 KlassToOopClosure _klass_closure;
58 public:
59 MetadataAwareOopsInGenClosure() {
60 _klass_closure.initialize(this);
61 }
62
63 virtual bool do_metadata() { return do_metadata_nv(); }
64 inline bool do_metadata_nv() { return true; }
65
66 virtual void do_klass(Klass* k);
67 void do_klass_nv(Klass* k);
68
69 virtual void do_cld(ClassLoaderData* cld) { do_cld_nv(cld); }
70 void do_cld_nv(ClassLoaderData* cld);
71 };
114 class PushAndMarkClosure: public MetadataAwareOopClosure {
115 private:
116 CMSCollector* _collector;
117 MemRegion _span;
118 CMSBitMap* _bit_map;
119 CMSBitMap* _mod_union_table;
120 CMSMarkStack* _mark_stack;
121 bool _concurrent_precleaning;
122 protected:
123 DO_OOP_WORK_DEFN
124 public:
125 PushAndMarkClosure(CMSCollector* collector,
126 MemRegion span,
127 ReferenceProcessor* rp,
128 CMSBitMap* bit_map,
129 CMSBitMap* mod_union_table,
130 CMSMarkStack* mark_stack,
131 bool concurrent_precleaning);
132 virtual void do_oop(oop* p);
133 virtual void do_oop(narrowOop* p);
134 inline void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop_work(p); }
135 inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
136 };
137
138 // In the parallel case, the bit map and the
139 // reference processor are currently all shared. Access to
140 // these shared mutable structures must use appropriate
141 // synchronization (for instance, via CAS). The marking stack
142 // used in the non-parallel case above is here replaced with
143 // an OopTaskQueue structure to allow efficient work stealing.
144 class ParPushAndMarkClosure: public MetadataAwareOopClosure {
145 private:
146 CMSCollector* _collector;
147 MemRegion _span;
148 CMSBitMap* _bit_map;
149 OopTaskQueue* _work_queue;
150 protected:
151 DO_OOP_WORK_DEFN
152 public:
153 ParPushAndMarkClosure(CMSCollector* collector,
154 MemRegion span,
155 ReferenceProcessor* rp,
156 CMSBitMap* bit_map,
157 OopTaskQueue* work_queue);
158 virtual void do_oop(oop* p);
159 virtual void do_oop(narrowOop* p);
160 inline void do_oop_nv(oop* p) { ParPushAndMarkClosure::do_oop_work(p); }
161 inline void do_oop_nv(narrowOop* p) { ParPushAndMarkClosure::do_oop_work(p); }
162 };
163
164 // The non-parallel version (the parallel version appears further below).
165 class MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
166 private:
167 MemRegion _span;
168 CMSBitMap* _bit_map;
169 CMSMarkStack* _mark_stack;
170 PushAndMarkClosure _pushAndMarkClosure;
171 CMSCollector* _collector;
172 Mutex* _freelistLock;
173 bool _yield;
174 // Whether closure is being used for concurrent precleaning
175 bool _concurrent_precleaning;
176 protected:
177 DO_OOP_WORK_DEFN
178 public:
179 MarkRefsIntoAndScanClosure(MemRegion span,
180 ReferenceProcessor* rp,
181 CMSBitMap* bit_map,
182 CMSBitMap* mod_union_table,
183 CMSMarkStack* mark_stack,
184 CMSCollector* collector,
185 bool should_yield,
186 bool concurrent_precleaning);
187 virtual void do_oop(oop* p);
188 virtual void do_oop(narrowOop* p);
189 inline void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
190 inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
191
192 void set_freelistLock(Mutex* m) {
193 _freelistLock = m;
194 }
195
196 private:
197 inline void do_yield_check();
198 void do_yield_work();
199 bool take_from_overflow_list();
200 };
201
202 // In this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
203 // stack and the bitMap are shared, so access needs to be suitably
204 // synchronized. An OopTaskQueue structure, supporting efficient
205 // work stealing, replaces a CMSMarkStack for storing grey objects.
206 class ParMarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
207 private:
208 MemRegion _span;
209 CMSBitMap* _bit_map;
210 OopTaskQueue* _work_queue;
211 const uint _low_water_mark;
212 ParPushAndMarkClosure _parPushAndMarkClosure;
213 protected:
214 DO_OOP_WORK_DEFN
215 public:
216 ParMarkRefsIntoAndScanClosure(CMSCollector* collector,
217 MemRegion span,
218 ReferenceProcessor* rp,
219 CMSBitMap* bit_map,
220 OopTaskQueue* work_queue);
221 virtual void do_oop(oop* p);
222 virtual void do_oop(narrowOop* p);
223 inline void do_oop_nv(oop* p) { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
224 inline void do_oop_nv(narrowOop* p) { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
225
226 void trim_queue(uint size);
227 };
228
229 // This closure is used during the concurrent marking phase
230 // following the first checkpoint. Its use is buried in
231 // the closure MarkFromRootsClosure.
232 class PushOrMarkClosure: public MetadataAwareOopClosure {
233 private:
234 CMSCollector* _collector;
235 MemRegion _span;
236 CMSBitMap* _bitMap;
237 CMSMarkStack* _markStack;
238 HeapWord* const _finger;
239 MarkFromRootsClosure* const
240 _parent;
241 protected:
242 DO_OOP_WORK_DEFN
243 public:
244 PushOrMarkClosure(CMSCollector* cms_collector,
245 MemRegion span,
246 CMSBitMap* bitMap,
247 CMSMarkStack* markStack,
248 HeapWord* finger,
249 MarkFromRootsClosure* parent);
250 virtual void do_oop(oop* p);
251 virtual void do_oop(narrowOop* p);
252 inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); }
253 inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
254
255 // Deal with a stack overflow condition
256 void handle_stack_overflow(HeapWord* lost);
257 private:
258 inline void do_yield_check();
259 };
260
261 // A parallel (MT) version of the above.
262 // This closure is used during the concurrent marking phase
263 // following the first checkpoint. Its use is buried in
264 // the closure ParMarkFromRootsClosure.
265 class ParPushOrMarkClosure: public MetadataAwareOopClosure {
266 private:
267 CMSCollector* _collector;
268 MemRegion _whole_span;
269 MemRegion _span; // local chunk
270 CMSBitMap* _bit_map;
271 OopTaskQueue* _work_queue;
272 CMSMarkStack* _overflow_stack;
273 HeapWord* const _finger;
274 HeapWord** const _global_finger_addr;
275 ParMarkFromRootsClosure* const
276 _parent;
277 protected:
278 DO_OOP_WORK_DEFN
279 public:
280 ParPushOrMarkClosure(CMSCollector* cms_collector,
281 MemRegion span,
282 CMSBitMap* bit_map,
283 OopTaskQueue* work_queue,
284 CMSMarkStack* mark_stack,
285 HeapWord* finger,
286 HeapWord** global_finger_addr,
287 ParMarkFromRootsClosure* parent);
288 virtual void do_oop(oop* p);
289 virtual void do_oop(narrowOop* p);
290 inline void do_oop_nv(oop* p) { ParPushOrMarkClosure::do_oop_work(p); }
291 inline void do_oop_nv(narrowOop* p) { ParPushOrMarkClosure::do_oop_work(p); }
292
293 // Deal with a stack overflow condition
294 void handle_stack_overflow(HeapWord* lost);
295 private:
296 inline void do_yield_check();
297 };
298
299 // For objects in CMS generation, this closure marks
300 // given objects (transitively) as being reachable/live.
301 // This is currently used during the (weak) reference object
302 // processing phase of the CMS final checkpoint step, as
303 // well as during the concurrent precleaning of the discovered
304 // reference lists.
305 class CMSKeepAliveClosure: public MetadataAwareOopClosure {
306 private:
307 CMSCollector* _collector;
308 const MemRegion _span;
309 CMSMarkStack* _mark_stack;
310 CMSBitMap* _bit_map;
311 bool _concurrent_precleaning;
312 protected:
313 DO_OOP_WORK_DEFN
314 public:
315 CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
316 CMSBitMap* bit_map, CMSMarkStack* mark_stack,
317 bool cpc);
318 bool concurrent_precleaning() const { return _concurrent_precleaning; }
319 virtual void do_oop(oop* p);
320 virtual void do_oop(narrowOop* p);
321 inline void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
322 inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
323 };
324
325 class CMSInnerParMarkAndPushClosure: public MetadataAwareOopClosure {
326 private:
327 CMSCollector* _collector;
328 MemRegion _span;
329 OopTaskQueue* _work_queue;
330 CMSBitMap* _bit_map;
331 protected:
332 DO_OOP_WORK_DEFN
333 public:
334 CMSInnerParMarkAndPushClosure(CMSCollector* collector,
335 MemRegion span, CMSBitMap* bit_map,
336 OopTaskQueue* work_queue);
337 virtual void do_oop(oop* p);
338 virtual void do_oop(narrowOop* p);
339 inline void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
340 inline void do_oop_nv(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
341 };
342
343 // A parallel (MT) version of the above, used when
344 // reference processing is parallel; the only difference
345 // is in the do_oop method.
346 class CMSParKeepAliveClosure: public MetadataAwareOopClosure {
347 private:
348 MemRegion _span;
349 OopTaskQueue* _work_queue;
350 CMSBitMap* _bit_map;
351 CMSInnerParMarkAndPushClosure
352 _mark_and_push;
353 const uint _low_water_mark;
354 void trim_queue(uint max);
355 protected:
356 DO_OOP_WORK_DEFN
357 public:
358 CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
359 CMSBitMap* bit_map, OopTaskQueue* work_queue);
360 virtual void do_oop(oop* p);
|
1 /*
2 * Copyright (c) 2007, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
25 #ifndef SHARE_VM_GC_CMS_CMSOOPCLOSURES_HPP
26 #define SHARE_VM_GC_CMS_CMSOOPCLOSURES_HPP
27
28 #include "gc/shared/genOopClosures.hpp"
29 #include "gc/shared/taskqueue.hpp"
30 #include "memory/iterator.hpp"
31
32 /////////////////////////////////////////////////////////////////
33 // Closures used by ConcurrentMarkSweepGeneration's collector
34 /////////////////////////////////////////////////////////////////
35 class ConcurrentMarkSweepGeneration;
36 class CMSBitMap;
37 class CMSMarkStack;
38 class CMSCollector;
39 class MarkFromRootsClosure;
40 class ParMarkFromRootsClosure;
41
42 // Decode the oop and call do_oop on it.
43 #define DO_OOP_WORK_DEFN \
44 void do_oop(oop obj); \
45 template <class T> inline void do_oop_work(T* p);
46
47 // TODO: This duplication of the MetadataAwareOopClosure class is only needed
48 // because some CMS OopClosures derive from OopsInGenClosure. It would be
49 // good to get rid of them completely.
50 class MetadataAwareOopsInGenClosure: public OopsInGenClosure {
51 KlassToOopClosure _klass_closure;
52 public:
53 MetadataAwareOopsInGenClosure() {
54 _klass_closure.initialize(this);
55 }
56
57 virtual bool do_metadata() { return do_metadata_nv(); }
58 inline bool do_metadata_nv() { return true; }
59
60 virtual void do_klass(Klass* k);
61 void do_klass_nv(Klass* k);
62
63 virtual void do_cld(ClassLoaderData* cld) { do_cld_nv(cld); }
64 void do_cld_nv(ClassLoaderData* cld);
65 };
108 class PushAndMarkClosure: public MetadataAwareOopClosure {
109 private:
110 CMSCollector* _collector;
111 MemRegion _span;
112 CMSBitMap* _bit_map;
113 CMSBitMap* _mod_union_table;
114 CMSMarkStack* _mark_stack;
115 bool _concurrent_precleaning;
116 protected:
117 DO_OOP_WORK_DEFN
118 public:
119 PushAndMarkClosure(CMSCollector* collector,
120 MemRegion span,
121 ReferenceProcessor* rp,
122 CMSBitMap* bit_map,
123 CMSBitMap* mod_union_table,
124 CMSMarkStack* mark_stack,
125 bool concurrent_precleaning);
126 virtual void do_oop(oop* p);
127 virtual void do_oop(narrowOop* p);
128 inline void do_oop_nv(oop* p);
129 inline void do_oop_nv(narrowOop* p);
130 };
131
132 // In the parallel case, the bit map and the
133 // reference processor are currently all shared. Access to
134 // these shared mutable structures must use appropriate
135 // synchronization (for instance, via CAS). The marking stack
136 // used in the non-parallel case above is here replaced with
137 // an OopTaskQueue structure to allow efficient work stealing.
138 class ParPushAndMarkClosure: public MetadataAwareOopClosure {
139 private:
140 CMSCollector* _collector;
141 MemRegion _span;
142 CMSBitMap* _bit_map;
143 OopTaskQueue* _work_queue;
144 protected:
145 DO_OOP_WORK_DEFN
146 public:
147 ParPushAndMarkClosure(CMSCollector* collector,
148 MemRegion span,
149 ReferenceProcessor* rp,
150 CMSBitMap* bit_map,
151 OopTaskQueue* work_queue);
152 virtual void do_oop(oop* p);
153 virtual void do_oop(narrowOop* p);
154 inline void do_oop_nv(oop* p);
155 inline void do_oop_nv(narrowOop* p);
156 };
157
158 // The non-parallel version (the parallel version appears further below).
159 class MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
160 private:
161 MemRegion _span;
162 CMSBitMap* _bit_map;
163 CMSMarkStack* _mark_stack;
164 PushAndMarkClosure _pushAndMarkClosure;
165 CMSCollector* _collector;
166 Mutex* _freelistLock;
167 bool _yield;
168 // Whether closure is being used for concurrent precleaning
169 bool _concurrent_precleaning;
170 protected:
171 DO_OOP_WORK_DEFN
172 public:
173 MarkRefsIntoAndScanClosure(MemRegion span,
174 ReferenceProcessor* rp,
175 CMSBitMap* bit_map,
176 CMSBitMap* mod_union_table,
177 CMSMarkStack* mark_stack,
178 CMSCollector* collector,
179 bool should_yield,
180 bool concurrent_precleaning);
181 virtual void do_oop(oop* p);
182 virtual void do_oop(narrowOop* p);
183 inline void do_oop_nv(oop* p);
184 inline void do_oop_nv(narrowOop* p);
185
186 void set_freelistLock(Mutex* m) {
187 _freelistLock = m;
188 }
189
190 private:
191 inline void do_yield_check();
192 void do_yield_work();
193 bool take_from_overflow_list();
194 };
195
196 // In this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
197 // stack and the bitMap are shared, so access needs to be suitably
198 // synchronized. An OopTaskQueue structure, supporting efficient
199 // work stealing, replaces a CMSMarkStack for storing grey objects.
200 class ParMarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
201 private:
202 MemRegion _span;
203 CMSBitMap* _bit_map;
204 OopTaskQueue* _work_queue;
205 const uint _low_water_mark;
206 ParPushAndMarkClosure _parPushAndMarkClosure;
207 protected:
208 DO_OOP_WORK_DEFN
209 public:
210 ParMarkRefsIntoAndScanClosure(CMSCollector* collector,
211 MemRegion span,
212 ReferenceProcessor* rp,
213 CMSBitMap* bit_map,
214 OopTaskQueue* work_queue);
215 virtual void do_oop(oop* p);
216 virtual void do_oop(narrowOop* p);
217 inline void do_oop_nv(oop* p);
218 inline void do_oop_nv(narrowOop* p);
219
220 void trim_queue(uint size);
221 };
222
223 // This closure is used during the concurrent marking phase
224 // following the first checkpoint. Its use is buried in
225 // the closure MarkFromRootsClosure.
226 class PushOrMarkClosure: public MetadataAwareOopClosure {
227 private:
228 CMSCollector* _collector;
229 MemRegion _span;
230 CMSBitMap* _bitMap;
231 CMSMarkStack* _markStack;
232 HeapWord* const _finger;
233 MarkFromRootsClosure* const
234 _parent;
235 protected:
236 DO_OOP_WORK_DEFN
237 public:
238 PushOrMarkClosure(CMSCollector* cms_collector,
239 MemRegion span,
240 CMSBitMap* bitMap,
241 CMSMarkStack* markStack,
242 HeapWord* finger,
243 MarkFromRootsClosure* parent);
244 virtual void do_oop(oop* p);
245 virtual void do_oop(narrowOop* p);
246 inline void do_oop_nv(oop* p);
247 inline void do_oop_nv(narrowOop* p);
248
249 // Deal with a stack overflow condition
250 void handle_stack_overflow(HeapWord* lost);
251 private:
252 inline void do_yield_check();
253 };
254
255 // A parallel (MT) version of the above.
256 // This closure is used during the concurrent marking phase
257 // following the first checkpoint. Its use is buried in
258 // the closure ParMarkFromRootsClosure.
259 class ParPushOrMarkClosure: public MetadataAwareOopClosure {
260 private:
261 CMSCollector* _collector;
262 MemRegion _whole_span;
263 MemRegion _span; // local chunk
264 CMSBitMap* _bit_map;
265 OopTaskQueue* _work_queue;
266 CMSMarkStack* _overflow_stack;
267 HeapWord* const _finger;
268 HeapWord** const _global_finger_addr;
269 ParMarkFromRootsClosure* const
270 _parent;
271 protected:
272 DO_OOP_WORK_DEFN
273 public:
274 ParPushOrMarkClosure(CMSCollector* cms_collector,
275 MemRegion span,
276 CMSBitMap* bit_map,
277 OopTaskQueue* work_queue,
278 CMSMarkStack* mark_stack,
279 HeapWord* finger,
280 HeapWord** global_finger_addr,
281 ParMarkFromRootsClosure* parent);
282 virtual void do_oop(oop* p);
283 virtual void do_oop(narrowOop* p);
284 inline void do_oop_nv(oop* p);
285 inline void do_oop_nv(narrowOop* p);
286
287 // Deal with a stack overflow condition
288 void handle_stack_overflow(HeapWord* lost);
289 private:
290 inline void do_yield_check();
291 };
292
293 // For objects in CMS generation, this closure marks
294 // given objects (transitively) as being reachable/live.
295 // This is currently used during the (weak) reference object
296 // processing phase of the CMS final checkpoint step, as
297 // well as during the concurrent precleaning of the discovered
298 // reference lists.
299 class CMSKeepAliveClosure: public MetadataAwareOopClosure {
300 private:
301 CMSCollector* _collector;
302 const MemRegion _span;
303 CMSMarkStack* _mark_stack;
304 CMSBitMap* _bit_map;
305 bool _concurrent_precleaning;
306 protected:
307 DO_OOP_WORK_DEFN
308 public:
309 CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
310 CMSBitMap* bit_map, CMSMarkStack* mark_stack,
311 bool cpc);
312 bool concurrent_precleaning() const { return _concurrent_precleaning; }
313 virtual void do_oop(oop* p);
314 virtual void do_oop(narrowOop* p);
315 inline void do_oop_nv(oop* p);
316 inline void do_oop_nv(narrowOop* p);
317 };
318
319 class CMSInnerParMarkAndPushClosure: public MetadataAwareOopClosure {
320 private:
321 CMSCollector* _collector;
322 MemRegion _span;
323 OopTaskQueue* _work_queue;
324 CMSBitMap* _bit_map;
325 protected:
326 DO_OOP_WORK_DEFN
327 public:
328 CMSInnerParMarkAndPushClosure(CMSCollector* collector,
329 MemRegion span, CMSBitMap* bit_map,
330 OopTaskQueue* work_queue);
331 virtual void do_oop(oop* p);
332 virtual void do_oop(narrowOop* p);
333 inline void do_oop_nv(oop* p);
334 inline void do_oop_nv(narrowOop* p);
335 };
336
337 // A parallel (MT) version of the above, used when
338 // reference processing is parallel; the only difference
339 // is in the do_oop method.
340 class CMSParKeepAliveClosure: public MetadataAwareOopClosure {
341 private:
342 MemRegion _span;
343 OopTaskQueue* _work_queue;
344 CMSBitMap* _bit_map;
345 CMSInnerParMarkAndPushClosure
346 _mark_and_push;
347 const uint _low_water_mark;
348 void trim_queue(uint max);
349 protected:
350 DO_OOP_WORK_DEFN
351 public:
352 CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
353 CMSBitMap* bit_map, OopTaskQueue* work_queue);
354 virtual void do_oop(oop* p);
|