Print this page
Split |
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp
+++ new/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp
1 1 /*
2 2 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_HPP
26 26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_HPP
27 27
28 28 #include "memory/genOopClosures.hpp"
29 29
30 30 /////////////////////////////////////////////////////////////////
31 31 // Closures used by ConcurrentMarkSweepGeneration's collector
32 32 /////////////////////////////////////////////////////////////////
33 33 class ConcurrentMarkSweepGeneration;
34 34 class CMSBitMap;
35 35 class CMSMarkStack;
36 36 class CMSCollector;
37 37 class MarkFromRootsClosure;
38 38 class Par_MarkFromRootsClosure;
39 39
40 40 // Decode the oop and call do_oop on it.
41 41 #define DO_OOP_WORK_DEFN \
42 42 void do_oop(oop obj); \
43 43 template <class T> inline void do_oop_work(T* p) { \
44 44 T heap_oop = oopDesc::load_heap_oop(p); \
45 45 if (!oopDesc::is_null(heap_oop)) { \
46 46 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); \
47 47 do_oop(obj); \
48 48 } \
49 49 }
50 50
↓ open down ↓ |
50 lines elided |
↑ open up ↑ |
51 51 class MarkRefsIntoClosure: public OopsInGenClosure {
52 52 private:
53 53 const MemRegion _span;
54 54 CMSBitMap* _bitMap;
55 55 protected:
56 56 DO_OOP_WORK_DEFN
57 57 public:
58 58 MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
59 59 virtual void do_oop(oop* p);
60 60 virtual void do_oop(narrowOop* p);
61 - inline void do_oop_nv(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
62 - inline void do_oop_nv(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
61 +
62 + Prefetch::style prefetch_style() {
63 + return Prefetch::do_read;
64 + }
65 +};
66 +
67 +class Par_MarkRefsIntoClosure: public OopsInGenClosure {
68 + private:
69 + const MemRegion _span;
70 + CMSBitMap* _bitMap;
71 + protected:
72 + DO_OOP_WORK_DEFN
73 + public:
74 + Par_MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
75 + virtual void do_oop(oop* p);
76 + virtual void do_oop(narrowOop* p);
63 77 bool do_header() { return true; }
64 78 Prefetch::style prefetch_style() {
65 79 return Prefetch::do_read;
66 80 }
67 81 };
68 82
69 83 // A variant of the above used in certain kinds of CMS
70 84 // marking verification.
71 85 class MarkRefsIntoVerifyClosure: public OopsInGenClosure {
72 86 private:
73 87 const MemRegion _span;
74 88 CMSBitMap* _verification_bm;
75 89 CMSBitMap* _cms_bm;
76 90 protected:
77 91 DO_OOP_WORK_DEFN
78 92 public:
79 93 MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm,
80 94 CMSBitMap* cms_bm);
81 95 virtual void do_oop(oop* p);
82 96 virtual void do_oop(narrowOop* p);
83 97 inline void do_oop_nv(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
84 98 inline void do_oop_nv(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
85 99 bool do_header() { return true; }
86 100 Prefetch::style prefetch_style() {
87 101 return Prefetch::do_read;
88 102 }
89 103 };
90 104
91 105 // KlassRememberingOopClosure is used when marking of the permanent generation
92 106 // is being done. It adds fields to support revisiting of klasses
93 107 // for class unloading. _should_remember_klasses should be set to
94 108 // indicate if klasses should be remembered. Currently that is whenever
95 109 // CMS class unloading is turned on. The _revisit_stack is used
96 110 // to save the klasses for later processing.
97 111 class KlassRememberingOopClosure : public OopClosure {
98 112 protected:
99 113 CMSCollector* _collector;
100 114 CMSMarkStack* _revisit_stack;
101 115 bool const _should_remember_klasses;
102 116 public:
103 117 void check_remember_klasses() const PRODUCT_RETURN;
104 118 virtual const bool should_remember_klasses() const {
105 119 check_remember_klasses();
106 120 return _should_remember_klasses;
107 121 }
108 122 virtual void remember_klass(Klass* k);
109 123
110 124 KlassRememberingOopClosure(CMSCollector* collector,
111 125 ReferenceProcessor* rp,
112 126 CMSMarkStack* revisit_stack);
113 127 };
114 128
115 129 // Similar to KlassRememberingOopClosure for use when multiple
116 130 // GC threads will execute the closure.
117 131
118 132 class Par_KlassRememberingOopClosure : public KlassRememberingOopClosure {
119 133 public:
120 134 Par_KlassRememberingOopClosure(CMSCollector* collector,
121 135 ReferenceProcessor* rp,
122 136 CMSMarkStack* revisit_stack):
123 137 KlassRememberingOopClosure(collector, rp, revisit_stack) {}
124 138 virtual void remember_klass(Klass* k);
125 139 };
126 140
127 141 // The non-parallel version (the parallel version appears further below).
128 142 class PushAndMarkClosure: public KlassRememberingOopClosure {
129 143 private:
130 144 MemRegion _span;
131 145 CMSBitMap* _bit_map;
132 146 CMSBitMap* _mod_union_table;
133 147 CMSMarkStack* _mark_stack;
134 148 bool _concurrent_precleaning;
135 149 protected:
136 150 DO_OOP_WORK_DEFN
137 151 public:
138 152 PushAndMarkClosure(CMSCollector* collector,
139 153 MemRegion span,
140 154 ReferenceProcessor* rp,
141 155 CMSBitMap* bit_map,
142 156 CMSBitMap* mod_union_table,
143 157 CMSMarkStack* mark_stack,
144 158 CMSMarkStack* revisit_stack,
145 159 bool concurrent_precleaning);
146 160 virtual void do_oop(oop* p);
147 161 virtual void do_oop(narrowOop* p);
148 162 inline void do_oop_nv(oop* p) { PushAndMarkClosure::do_oop_work(p); }
149 163 inline void do_oop_nv(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
150 164 bool do_header() { return true; }
151 165 Prefetch::style prefetch_style() {
152 166 return Prefetch::do_read;
153 167 }
154 168 // In support of class unloading
155 169 virtual const bool should_remember_mdo() const {
156 170 return false;
157 171 // return _should_remember_klasses;
158 172 }
159 173 virtual void remember_mdo(DataLayout* v);
160 174 };
161 175
162 176 // In the parallel case, the revisit stack, the bit map and the
163 177 // reference processor are currently all shared. Access to
164 178 // these shared mutable structures must use appropriate
165 179 // synchronization (for instance, via CAS). The marking stack
166 180 // used in the non-parallel case above is here replaced with
167 181 // an OopTaskQueue structure to allow efficient work stealing.
168 182 class Par_PushAndMarkClosure: public Par_KlassRememberingOopClosure {
169 183 private:
170 184 MemRegion _span;
171 185 CMSBitMap* _bit_map;
172 186 OopTaskQueue* _work_queue;
173 187 protected:
174 188 DO_OOP_WORK_DEFN
175 189 public:
176 190 Par_PushAndMarkClosure(CMSCollector* collector,
177 191 MemRegion span,
178 192 ReferenceProcessor* rp,
179 193 CMSBitMap* bit_map,
180 194 OopTaskQueue* work_queue,
181 195 CMSMarkStack* revisit_stack);
182 196 virtual void do_oop(oop* p);
183 197 virtual void do_oop(narrowOop* p);
184 198 inline void do_oop_nv(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
185 199 inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
186 200 bool do_header() { return true; }
187 201 Prefetch::style prefetch_style() {
188 202 return Prefetch::do_read;
189 203 }
190 204 // In support of class unloading
191 205 virtual const bool should_remember_mdo() const {
192 206 return false;
193 207 // return _should_remember_klasses;
194 208 }
195 209 virtual void remember_mdo(DataLayout* v);
196 210 };
197 211
198 212 // The non-parallel version (the parallel version appears further below).
199 213 class MarkRefsIntoAndScanClosure: public OopsInGenClosure {
200 214 private:
201 215 MemRegion _span;
202 216 CMSBitMap* _bit_map;
203 217 CMSMarkStack* _mark_stack;
204 218 PushAndMarkClosure _pushAndMarkClosure;
205 219 CMSCollector* _collector;
206 220 Mutex* _freelistLock;
207 221 bool _yield;
208 222 // Whether closure is being used for concurrent precleaning
209 223 bool _concurrent_precleaning;
210 224 protected:
211 225 DO_OOP_WORK_DEFN
212 226 public:
213 227 MarkRefsIntoAndScanClosure(MemRegion span,
214 228 ReferenceProcessor* rp,
215 229 CMSBitMap* bit_map,
216 230 CMSBitMap* mod_union_table,
217 231 CMSMarkStack* mark_stack,
218 232 CMSMarkStack* revisit_stack,
219 233 CMSCollector* collector,
220 234 bool should_yield,
221 235 bool concurrent_precleaning);
222 236 virtual void do_oop(oop* p);
223 237 virtual void do_oop(narrowOop* p);
224 238 inline void do_oop_nv(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
225 239 inline void do_oop_nv(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
226 240 bool do_header() { return true; }
227 241 Prefetch::style prefetch_style() {
228 242 return Prefetch::do_read;
229 243 }
230 244 void set_freelistLock(Mutex* m) {
231 245 _freelistLock = m;
232 246 }
233 247 virtual const bool should_remember_klasses() const {
234 248 return _pushAndMarkClosure.should_remember_klasses();
235 249 }
236 250 virtual void remember_klass(Klass* k) {
237 251 _pushAndMarkClosure.remember_klass(k);
238 252 }
239 253
240 254 private:
241 255 inline void do_yield_check();
242 256 void do_yield_work();
243 257 bool take_from_overflow_list();
244 258 };
245 259
246 260 // Tn this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
247 261 // stack and the bitMap are shared, so access needs to be suitably
248 262 // sycnhronized. An OopTaskQueue structure, supporting efficient
249 263 // workstealing, replaces a CMSMarkStack for storing grey objects.
250 264 class Par_MarkRefsIntoAndScanClosure: public OopsInGenClosure {
251 265 private:
252 266 MemRegion _span;
253 267 CMSBitMap* _bit_map;
254 268 OopTaskQueue* _work_queue;
255 269 const uint _low_water_mark;
256 270 Par_PushAndMarkClosure _par_pushAndMarkClosure;
257 271 protected:
258 272 DO_OOP_WORK_DEFN
259 273 public:
260 274 Par_MarkRefsIntoAndScanClosure(CMSCollector* collector,
261 275 MemRegion span,
262 276 ReferenceProcessor* rp,
263 277 CMSBitMap* bit_map,
264 278 OopTaskQueue* work_queue,
265 279 CMSMarkStack* revisit_stack);
266 280 virtual void do_oop(oop* p);
267 281 virtual void do_oop(narrowOop* p);
268 282 inline void do_oop_nv(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
269 283 inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
270 284 bool do_header() { return true; }
271 285 // When ScanMarkedObjectsAgainClosure is used,
272 286 // it passes [Par_]MarkRefsIntoAndScanClosure to oop_oop_iterate(),
273 287 // and this delegation is used.
274 288 virtual const bool should_remember_klasses() const {
275 289 return _par_pushAndMarkClosure.should_remember_klasses();
276 290 }
277 291 // See comment on should_remember_klasses() above.
278 292 virtual void remember_klass(Klass* k) {
279 293 _par_pushAndMarkClosure.remember_klass(k);
280 294 }
281 295 Prefetch::style prefetch_style() {
282 296 return Prefetch::do_read;
283 297 }
284 298 void trim_queue(uint size);
285 299 };
286 300
287 301 // This closure is used during the concurrent marking phase
288 302 // following the first checkpoint. Its use is buried in
289 303 // the closure MarkFromRootsClosure.
290 304 class PushOrMarkClosure: public KlassRememberingOopClosure {
291 305 private:
292 306 MemRegion _span;
293 307 CMSBitMap* _bitMap;
294 308 CMSMarkStack* _markStack;
295 309 HeapWord* const _finger;
296 310 MarkFromRootsClosure* const
297 311 _parent;
298 312 protected:
299 313 DO_OOP_WORK_DEFN
300 314 public:
301 315 PushOrMarkClosure(CMSCollector* cms_collector,
302 316 MemRegion span,
303 317 CMSBitMap* bitMap,
304 318 CMSMarkStack* markStack,
305 319 CMSMarkStack* revisitStack,
306 320 HeapWord* finger,
307 321 MarkFromRootsClosure* parent);
308 322 virtual void do_oop(oop* p);
309 323 virtual void do_oop(narrowOop* p);
310 324 inline void do_oop_nv(oop* p) { PushOrMarkClosure::do_oop_work(p); }
311 325 inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
312 326 // In support of class unloading
313 327 virtual const bool should_remember_mdo() const {
314 328 return false;
315 329 // return _should_remember_klasses;
316 330 }
317 331 virtual void remember_mdo(DataLayout* v);
318 332
319 333 // Deal with a stack overflow condition
320 334 void handle_stack_overflow(HeapWord* lost);
321 335 private:
322 336 inline void do_yield_check();
323 337 };
324 338
325 339 // A parallel (MT) version of the above.
326 340 // This closure is used during the concurrent marking phase
327 341 // following the first checkpoint. Its use is buried in
328 342 // the closure Par_MarkFromRootsClosure.
329 343 class Par_PushOrMarkClosure: public Par_KlassRememberingOopClosure {
330 344 private:
331 345 MemRegion _whole_span;
332 346 MemRegion _span; // local chunk
333 347 CMSBitMap* _bit_map;
334 348 OopTaskQueue* _work_queue;
335 349 CMSMarkStack* _overflow_stack;
336 350 HeapWord* const _finger;
337 351 HeapWord** const _global_finger_addr;
338 352 Par_MarkFromRootsClosure* const
339 353 _parent;
340 354 protected:
341 355 DO_OOP_WORK_DEFN
342 356 public:
343 357 Par_PushOrMarkClosure(CMSCollector* cms_collector,
344 358 MemRegion span,
345 359 CMSBitMap* bit_map,
346 360 OopTaskQueue* work_queue,
347 361 CMSMarkStack* mark_stack,
348 362 CMSMarkStack* revisit_stack,
349 363 HeapWord* finger,
350 364 HeapWord** global_finger_addr,
351 365 Par_MarkFromRootsClosure* parent);
352 366 virtual void do_oop(oop* p);
353 367 virtual void do_oop(narrowOop* p);
354 368 inline void do_oop_nv(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
355 369 inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
356 370 // In support of class unloading
357 371 virtual const bool should_remember_mdo() const {
358 372 return false;
359 373 // return _should_remember_klasses;
360 374 }
361 375 virtual void remember_mdo(DataLayout* v);
362 376
363 377 // Deal with a stack overflow condition
364 378 void handle_stack_overflow(HeapWord* lost);
365 379 private:
366 380 inline void do_yield_check();
367 381 };
368 382
369 383 // For objects in CMS generation, this closure marks
370 384 // given objects (transitively) as being reachable/live.
371 385 // This is currently used during the (weak) reference object
372 386 // processing phase of the CMS final checkpoint step, as
373 387 // well as during the concurrent precleaning of the discovered
374 388 // reference lists.
375 389 class CMSKeepAliveClosure: public KlassRememberingOopClosure {
376 390 private:
377 391 const MemRegion _span;
378 392 CMSMarkStack* _mark_stack;
379 393 CMSBitMap* _bit_map;
380 394 bool _concurrent_precleaning;
381 395 protected:
382 396 DO_OOP_WORK_DEFN
383 397 public:
384 398 CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
385 399 CMSBitMap* bit_map, CMSMarkStack* mark_stack,
386 400 CMSMarkStack* revisit_stack, bool cpc);
387 401 bool concurrent_precleaning() const { return _concurrent_precleaning; }
388 402 virtual void do_oop(oop* p);
389 403 virtual void do_oop(narrowOop* p);
390 404 inline void do_oop_nv(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
391 405 inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
392 406 };
393 407
394 408 class CMSInnerParMarkAndPushClosure: public Par_KlassRememberingOopClosure {
395 409 private:
396 410 MemRegion _span;
397 411 OopTaskQueue* _work_queue;
398 412 CMSBitMap* _bit_map;
399 413 protected:
400 414 DO_OOP_WORK_DEFN
401 415 public:
402 416 CMSInnerParMarkAndPushClosure(CMSCollector* collector,
403 417 MemRegion span, CMSBitMap* bit_map,
404 418 CMSMarkStack* revisit_stack,
405 419 OopTaskQueue* work_queue);
406 420 virtual void do_oop(oop* p);
407 421 virtual void do_oop(narrowOop* p);
408 422 inline void do_oop_nv(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
409 423 inline void do_oop_nv(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
410 424 };
411 425
412 426 // A parallel (MT) version of the above, used when
413 427 // reference processing is parallel; the only difference
414 428 // is in the do_oop method.
415 429 class CMSParKeepAliveClosure: public Par_KlassRememberingOopClosure {
416 430 private:
417 431 MemRegion _span;
418 432 OopTaskQueue* _work_queue;
419 433 CMSBitMap* _bit_map;
420 434 CMSInnerParMarkAndPushClosure
421 435 _mark_and_push;
422 436 const uint _low_water_mark;
423 437 void trim_queue(uint max);
424 438 protected:
425 439 DO_OOP_WORK_DEFN
426 440 public:
427 441 CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
428 442 CMSBitMap* bit_map, CMSMarkStack* revisit_stack,
429 443 OopTaskQueue* work_queue);
430 444 virtual void do_oop(oop* p);
431 445 virtual void do_oop(narrowOop* p);
432 446 inline void do_oop_nv(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
433 447 inline void do_oop_nv(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
434 448 };
435 449
436 450 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_HPP
↓ open down ↓ |
364 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX