21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_SHARED_REFERENCEPROCESSOR_HPP
26 #define SHARE_VM_GC_SHARED_REFERENCEPROCESSOR_HPP
27
28 #include "gc/shared/referenceDiscoverer.hpp"
29 #include "gc/shared/referencePolicy.hpp"
30 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
31 #include "gc/shared/referenceProcessorStats.hpp"
32 #include "memory/referenceType.hpp"
33 #include "oops/instanceRefKlass.hpp"
34
35 class GCTimer;
36
37 // ReferenceProcessor class encapsulates the per-"collector" processing
38 // of java.lang.Reference objects for GC. The interface is useful for supporting
39 // a generational abstraction, in particular when there are multiple
40 // generations that are being independently collected -- possibly
41 // concurrently and/or incrementally. Note, however, that the
42 // ReferenceProcessor class abstracts away from a generational setting
43 // by using only a heap interval (called "span" below), thus allowing
44 // its use in a straightforward manner in a general, non-generational
45 // setting.
46 //
47 // The basic idea is that each ReferenceProcessor object concerns
48 // itself with ("weak") reference processing in a specific "span"
49 // of the heap of interest to a specific collector. Currently,
50 // the span is a convex interval of the heap, but, efficiency
51 // apart, there seems to be no reason it couldn't be extended
52 // (with appropriate modifications) to any "non-convex interval".
53
54 // forward references
55 class ReferencePolicy;
56 class AbstractRefProcTaskExecutor;
57
58 // List of discovered references.
59 class DiscoveredList {
60 public:
61 DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
62 inline oop head() const;
63 HeapWord* adr_head() {
64 return UseCompressedOops ? (HeapWord*)&_compressed_head :
65 (HeapWord*)&_oop_head;
66 }
67 inline void set_head(oop o);
68 inline bool is_empty() const;
69 size_t length() { return _len; }
70 void set_length(size_t len) { _len = len; }
71 void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
72 void dec_length(size_t dec) { _len -= dec; }
151
152 // Statistics
153 NOT_PRODUCT(
154 inline size_t processed() const { return _processed; }
155 inline size_t removed() const { return _removed; }
156 )
157
158 inline void move_to_next() {
159 if (_ref == _next) {
160 // End of the list.
161 _ref = NULL;
162 } else {
163 _ref = _next;
164 }
165 assert(_ref != _first_seen, "cyclic ref_list found");
166 NOT_PRODUCT(_processed++);
167 }
168 };
169
170 class ReferenceProcessor : public ReferenceDiscoverer {
171
172 private:
173 size_t total_count(DiscoveredList lists[]) const;
174
175 protected:
176 // The SoftReference master timestamp clock
177 static jlong _soft_ref_timestamp_clock;
178
179 MemRegion _span; // (right-open) interval of heap
180 // subject to wkref discovery
181
182 bool _discovering_refs; // true when discovery enabled
183 bool _discovery_is_atomic; // if discovery is atomic wrt
184 // other collectors in configuration
185 bool _discovery_is_mt; // true if reference discovery is MT.
186
187 bool _enqueuing_is_done; // true if all weak references enqueued
188 bool _processing_is_mt; // true during phases when
189 // reference processing is MT.
190 uint _next_id; // round-robin mod _num_q counter in
191 // support of work distribution
192
193 // For collectors that do not keep GC liveness information
194 // in the object header, this field holds a closure that
195 // helps the reference processor determine the reachability
196 // of an oop. It is currently initialized to NULL for all
197 // collectors except for CMS and G1.
198 BoolObjectClosure* _is_alive_non_header;
199
200 // Soft ref clearing policies
240 // Process references with a certain reachability level.
241 void process_discovered_reflist(DiscoveredList refs_lists[],
242 ReferencePolicy* policy,
243 bool clear_referent,
244 BoolObjectClosure* is_alive,
245 OopClosure* keep_alive,
246 VoidClosure* complete_gc,
247 AbstractRefProcTaskExecutor* task_executor,
248 ReferenceProcessorPhaseTimes* phase_times);
249
250 // Work methods used by the method process_discovered_reflist
251 // Phase1: keep alive all those referents that are otherwise
252 // dead but which must be kept alive by policy (and their closure).
253 void process_phase1(DiscoveredList& refs_list,
254 ReferencePolicy* policy,
255 BoolObjectClosure* is_alive,
256 OopClosure* keep_alive,
257 VoidClosure* complete_gc);
258 // Phase2: remove all those references whose referents are
259 // reachable.
260 inline void process_phase2(DiscoveredList& refs_list,
261 BoolObjectClosure* is_alive,
262 OopClosure* keep_alive,
263 VoidClosure* complete_gc) {
264 if (discovery_is_atomic()) {
265 // complete_gc is ignored in this case for this phase
266 pp2_work(refs_list, is_alive, keep_alive);
267 } else {
268 assert(complete_gc != NULL, "Error");
269 pp2_work_concurrent_discovery(refs_list, is_alive,
270 keep_alive, complete_gc);
271 }
272 }
273 // Work methods in support of process_phase2
274 void pp2_work(DiscoveredList& refs_list,
275 BoolObjectClosure* is_alive,
276 OopClosure* keep_alive);
277 void pp2_work_concurrent_discovery(
278 DiscoveredList& refs_list,
279 BoolObjectClosure* is_alive,
280 OopClosure* keep_alive,
281 VoidClosure* complete_gc);
282 // Phase3: process the referents by either clearing them
283 // or keeping them alive (and their closure)
284 void process_phase3(DiscoveredList& refs_list,
285 bool clear_referent,
286 BoolObjectClosure* is_alive,
287 OopClosure* keep_alive,
288 VoidClosure* complete_gc);
289
290 // Enqueue references with a certain reachability level
291 void enqueue_discovered_reflist(DiscoveredList& refs_list);
292
295 // The first argument is a predicate on an oop that indicates
296 // its (strong) reachability and the second is a closure that
297 // may be used to incrementalize or abort the precleaning process.
298 // The caller is responsible for taking care of potential
299 // interference with concurrent operations on these lists
300 // (or predicates involved) by other threads. Currently
301 // only used by the CMS collector.
302 void preclean_discovered_references(BoolObjectClosure* is_alive,
303 OopClosure* keep_alive,
304 VoidClosure* complete_gc,
305 YieldClosure* yield,
306 GCTimer* gc_timer);
307
308 // Returns the name of the discovered reference list
309 // occupying the i / _num_q slot.
310 const char* list_name(uint i);
311
312 void enqueue_discovered_reflists(AbstractRefProcTaskExecutor* task_executor,
313 ReferenceProcessorPhaseTimes* phase_times);
314
315 protected:
316 // "Preclean" the given discovered reference list
317 // by removing references with strongly reachable referents.
318 // Currently used in support of CMS only.
319 void preclean_discovered_reflist(DiscoveredList& refs_list,
320 BoolObjectClosure* is_alive,
321 OopClosure* keep_alive,
322 VoidClosure* complete_gc,
323 YieldClosure* yield);
324
325 // round-robin mod _num_q (not: _not_ mode _max_num_q)
326 uint next_id() {
327 uint id = _next_id;
328 assert(!_discovery_is_mt, "Round robin should only be used in serial discovery");
329 if (++_next_id == _num_q) {
330 _next_id = 0;
331 }
332 assert(_next_id < _num_q, "_next_id %u _num_q %u _max_num_q %u", _next_id, _num_q, _max_num_q);
333 return id;
334 }
335 DiscoveredList* get_discovered_list(ReferenceType rt);
336 inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj,
337 HeapWord* discovered_addr);
338
339 void clear_discovered_references(DiscoveredList& refs_list);
340
341 void log_reflist_counts(DiscoveredList ref_lists[], uint active_length, size_t total_count) PRODUCT_RETURN;
342
343 // Balances reference queues.
344 void balance_queues(DiscoveredList ref_lists[]);
345
346 // Update (advance) the soft ref master clock field.
347 void update_soft_ref_master_clock();
348
349 public:
350 // Default parameters give you a vanilla reference processor.
351 ReferenceProcessor(MemRegion span,
352 bool mt_processing = false, uint mt_processing_degree = 1,
353 bool mt_discovery = false, uint mt_discovery_degree = 1,
354 bool atomic_discovery = true,
355 BoolObjectClosure* is_alive_non_header = NULL);
356
357 // RefDiscoveryPolicy values
358 enum DiscoveryPolicy {
359 ReferenceBasedDiscovery = 0,
360 ReferentBasedDiscovery = 1,
361 DiscoveryPolicyMin = ReferenceBasedDiscovery,
362 DiscoveryPolicyMax = ReferentBasedDiscovery
363 };
364
365 static void init_statics();
366
367 public:
368 // get and set "is_alive_non_header" field
369 BoolObjectClosure* is_alive_non_header() {
370 return _is_alive_non_header;
371 }
372 void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) {
373 _is_alive_non_header = is_alive_non_header;
374 }
375
376 // get and set span
377 MemRegion span() { return _span; }
378 void set_span(MemRegion span) { _span = span; }
379
380 // start and stop weak ref discovery
381 void enable_discovery(bool check_no_refs = true);
382 void disable_discovery() { _discovering_refs = false; }
383 bool discovery_enabled() { return _discovering_refs; }
384
385 // whether discovery is atomic wrt other collectors
386 bool discovery_is_atomic() const { return _discovery_is_atomic; }
387 void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; }
388
389 // whether discovery is done by multiple threads same-old-timeously
390 bool discovery_is_mt() const { return _discovery_is_mt; }
391 void set_mt_discovery(bool mt) { _discovery_is_mt = mt; }
392
393 // Whether we are in a phase when _processing_ is MT.
394 bool processing_is_mt() const { return _processing_is_mt; }
395 void set_mt_processing(bool mt) { _processing_is_mt = mt; }
396
397 // whether all enqueueing of weak references is complete
398 bool enqueuing_is_done() { return _enqueuing_is_done; }
418 VoidClosure* complete_gc,
419 AbstractRefProcTaskExecutor* task_executor,
420 ReferenceProcessorPhaseTimes* phase_times);
421
422 // Enqueue references at end of GC (called by the garbage collector)
423 void enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor,
424 ReferenceProcessorPhaseTimes* phase_times);
425
426 // If a discovery is in process that is being superceded, abandon it: all
427 // the discovered lists will be empty, and all the objects on them will
428 // have NULL discovered fields. Must be called only at a safepoint.
429 void abandon_partial_discovery();
430
431 size_t total_reference_count(ReferenceType rt) const;
432
433 // debugging
434 void verify_no_references_recorded() PRODUCT_RETURN;
435 void verify_referent(oop obj) PRODUCT_RETURN;
436 };
437
438 // A utility class to disable reference discovery in
439 // the scope which contains it, for given ReferenceProcessor.
440 class NoRefDiscovery: StackObj {
441 private:
442 ReferenceProcessor* _rp;
443 bool _was_discovering_refs;
444 public:
445 NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) {
446 _was_discovering_refs = _rp->discovery_enabled();
447 if (_was_discovering_refs) {
448 _rp->disable_discovery();
449 }
450 }
451
452 ~NoRefDiscovery() {
453 if (_was_discovering_refs) {
454 _rp->enable_discovery(false /*check_no_refs*/);
455 }
456 }
457 };
458
459
460 // A utility class to temporarily mutate the span of the
461 // given ReferenceProcessor in the scope that contains it.
462 class ReferenceProcessorSpanMutator: StackObj {
463 private:
464 ReferenceProcessor* _rp;
465 MemRegion _saved_span;
466
467 public:
468 ReferenceProcessorSpanMutator(ReferenceProcessor* rp,
469 MemRegion span):
470 _rp(rp) {
471 _saved_span = _rp->span();
472 _rp->set_span(span);
473 }
474
475 ~ReferenceProcessorSpanMutator() {
476 _rp->set_span(_saved_span);
477 }
478 };
479
480 // A utility class to temporarily change the MT'ness of
481 // reference discovery for the given ReferenceProcessor
482 // in the scope that contains it.
483 class ReferenceProcessorMTDiscoveryMutator: StackObj {
484 private:
485 ReferenceProcessor* _rp;
486 bool _saved_mt;
487
488 public:
489 ReferenceProcessorMTDiscoveryMutator(ReferenceProcessor* rp,
490 bool mt):
491 _rp(rp) {
492 _saved_mt = _rp->discovery_is_mt();
493 _rp->set_mt_discovery(mt);
494 }
495
496 ~ReferenceProcessorMTDiscoveryMutator() {
497 _rp->set_mt_discovery(_saved_mt);
498 }
499 };
500
501
502 // A utility class to temporarily change the disposition
503 // of the "is_alive_non_header" closure field of the
504 // given ReferenceProcessor in the scope that contains it.
505 class ReferenceProcessorIsAliveMutator: StackObj {
506 private:
507 ReferenceProcessor* _rp;
508 BoolObjectClosure* _saved_cl;
509
510 public:
511 ReferenceProcessorIsAliveMutator(ReferenceProcessor* rp,
512 BoolObjectClosure* cl):
513 _rp(rp) {
514 _saved_cl = _rp->is_alive_non_header();
515 _rp->set_is_alive_non_header(cl);
516 }
517
518 ~ReferenceProcessorIsAliveMutator() {
519 _rp->set_is_alive_non_header(_saved_cl);
520 }
|
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_SHARED_REFERENCEPROCESSOR_HPP
26 #define SHARE_VM_GC_SHARED_REFERENCEPROCESSOR_HPP
27
28 #include "gc/shared/referenceDiscoverer.hpp"
29 #include "gc/shared/referencePolicy.hpp"
30 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
31 #include "gc/shared/referenceProcessorStats.hpp"
32 #include "memory/referenceType.hpp"
33 #include "oops/instanceRefKlass.hpp"
34
35 class GCTimer;
36
37 // ReferenceProcessor class encapsulates the per-"collector" processing
38 // of java.lang.Reference objects for GC. The interface is useful for supporting
39 // a generational abstraction, in particular when there are multiple
40 // generations that are being independently collected -- possibly
41 // concurrently and/or incrementally.
42 // ReferenceProcessor class abstracts away from a generational setting
43 // by using a closure that determines whether a given reference or referent are
44 // subject to this ReferenceProcessor's discovery, thus allowing its use in a
45 // straightforward manner in a general, non-generational, non-contiguous generation
46 // (or heap) setting.
47 //
48
49 // forward references
50 class ReferencePolicy;
51 class AbstractRefProcTaskExecutor;
52
53 // List of discovered references.
54 class DiscoveredList {
55 public:
56 DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
57 inline oop head() const;
58 HeapWord* adr_head() {
59 return UseCompressedOops ? (HeapWord*)&_compressed_head :
60 (HeapWord*)&_oop_head;
61 }
62 inline void set_head(oop o);
63 inline bool is_empty() const;
64 size_t length() { return _len; }
65 void set_length(size_t len) { _len = len; }
66 void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
67 void dec_length(size_t dec) { _len -= dec; }
146
147 // Statistics
148 NOT_PRODUCT(
149 inline size_t processed() const { return _processed; }
150 inline size_t removed() const { return _removed; }
151 )
152
153 inline void move_to_next() {
154 if (_ref == _next) {
155 // End of the list.
156 _ref = NULL;
157 } else {
158 _ref = _next;
159 }
160 assert(_ref != _first_seen, "cyclic ref_list found");
161 NOT_PRODUCT(_processed++);
162 }
163 };
164
165 class ReferenceProcessor : public ReferenceDiscoverer {
166 size_t total_count(DiscoveredList lists[]) const;
167
168 // The SoftReference master timestamp clock
169 static jlong _soft_ref_timestamp_clock;
170
171 BoolObjectClosure* _is_subject_to_discovery; // determines whether a given oop is subject
172 // to this ReferenceProcessor's discovery
173 // (and further processing).
174
175 bool _discovering_refs; // true when discovery enabled
176 bool _discovery_is_atomic; // if discovery is atomic wrt
177 // other collectors in configuration
178 bool _discovery_is_mt; // true if reference discovery is MT.
179
180 bool _enqueuing_is_done; // true if all weak references enqueued
181 bool _processing_is_mt; // true during phases when
182 // reference processing is MT.
183 uint _next_id; // round-robin mod _num_q counter in
184 // support of work distribution
185
186 // For collectors that do not keep GC liveness information
187 // in the object header, this field holds a closure that
188 // helps the reference processor determine the reachability
189 // of an oop. It is currently initialized to NULL for all
190 // collectors except for CMS and G1.
191 BoolObjectClosure* _is_alive_non_header;
192
193 // Soft ref clearing policies
233 // Process references with a certain reachability level.
234 void process_discovered_reflist(DiscoveredList refs_lists[],
235 ReferencePolicy* policy,
236 bool clear_referent,
237 BoolObjectClosure* is_alive,
238 OopClosure* keep_alive,
239 VoidClosure* complete_gc,
240 AbstractRefProcTaskExecutor* task_executor,
241 ReferenceProcessorPhaseTimes* phase_times);
242
243 // Work methods used by the method process_discovered_reflist
244 // Phase1: keep alive all those referents that are otherwise
245 // dead but which must be kept alive by policy (and their closure).
246 void process_phase1(DiscoveredList& refs_list,
247 ReferencePolicy* policy,
248 BoolObjectClosure* is_alive,
249 OopClosure* keep_alive,
250 VoidClosure* complete_gc);
251 // Phase2: remove all those references whose referents are
252 // reachable.
253 void process_phase2(DiscoveredList& refs_list,
254 BoolObjectClosure* is_alive,
255 OopClosure* keep_alive,
256 VoidClosure* complete_gc);
257 // Work methods in support of process_phase2
258 void pp2_work(DiscoveredList& refs_list,
259 BoolObjectClosure* is_alive,
260 OopClosure* keep_alive);
261 void pp2_work_concurrent_discovery(
262 DiscoveredList& refs_list,
263 BoolObjectClosure* is_alive,
264 OopClosure* keep_alive,
265 VoidClosure* complete_gc);
266 // Phase3: process the referents by either clearing them
267 // or keeping them alive (and their closure)
268 void process_phase3(DiscoveredList& refs_list,
269 bool clear_referent,
270 BoolObjectClosure* is_alive,
271 OopClosure* keep_alive,
272 VoidClosure* complete_gc);
273
274 // Enqueue references with a certain reachability level
275 void enqueue_discovered_reflist(DiscoveredList& refs_list);
276
279 // The first argument is a predicate on an oop that indicates
280 // its (strong) reachability and the second is a closure that
281 // may be used to incrementalize or abort the precleaning process.
282 // The caller is responsible for taking care of potential
283 // interference with concurrent operations on these lists
284 // (or predicates involved) by other threads. Currently
285 // only used by the CMS collector.
286 void preclean_discovered_references(BoolObjectClosure* is_alive,
287 OopClosure* keep_alive,
288 VoidClosure* complete_gc,
289 YieldClosure* yield,
290 GCTimer* gc_timer);
291
292 // Returns the name of the discovered reference list
293 // occupying the i / _num_q slot.
294 const char* list_name(uint i);
295
296 void enqueue_discovered_reflists(AbstractRefProcTaskExecutor* task_executor,
297 ReferenceProcessorPhaseTimes* phase_times);
298
299 // "Preclean" the given discovered reference list
300 // by removing references with strongly reachable referents.
301 // Currently used in support of CMS only.
302 void preclean_discovered_reflist(DiscoveredList& refs_list,
303 BoolObjectClosure* is_alive,
304 OopClosure* keep_alive,
305 VoidClosure* complete_gc,
306 YieldClosure* yield);
307 private:
308 // round-robin mod _num_q (not: _not_ mode _max_num_q)
309 uint next_id() {
310 uint id = _next_id;
311 assert(!_discovery_is_mt, "Round robin should only be used in serial discovery");
312 if (++_next_id == _num_q) {
313 _next_id = 0;
314 }
315 assert(_next_id < _num_q, "_next_id %u _num_q %u _max_num_q %u", _next_id, _num_q, _max_num_q);
316 return id;
317 }
318 DiscoveredList* get_discovered_list(ReferenceType rt);
319 inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj,
320 HeapWord* discovered_addr);
321
322 void clear_discovered_references(DiscoveredList& refs_list);
323
324 void log_reflist_counts(DiscoveredList ref_lists[], uint active_length, size_t total_count) PRODUCT_RETURN;
325
326 // Balances reference queues.
327 void balance_queues(DiscoveredList ref_lists[]);
328
329 // Update (advance) the soft ref master clock field.
330 void update_soft_ref_master_clock();
331
332 bool is_subject_to_discovery(oop const obj) const;
333
334 public:
335 // Default parameters give you a vanilla reference processor.
336 ReferenceProcessor(BoolObjectClosure* is_subject_to_discovery,
337 bool mt_processing = false, uint mt_processing_degree = 1,
338 bool mt_discovery = false, uint mt_discovery_degree = 1,
339 bool atomic_discovery = true,
340 BoolObjectClosure* is_alive_non_header = NULL);
341
342 // RefDiscoveryPolicy values
343 enum DiscoveryPolicy {
344 ReferenceBasedDiscovery = 0,
345 ReferentBasedDiscovery = 1,
346 DiscoveryPolicyMin = ReferenceBasedDiscovery,
347 DiscoveryPolicyMax = ReferentBasedDiscovery
348 };
349
350 static void init_statics();
351
352 public:
353 // get and set "is_alive_non_header" field
354 BoolObjectClosure* is_alive_non_header() {
355 return _is_alive_non_header;
356 }
357 void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) {
358 _is_alive_non_header = is_alive_non_header;
359 }
360
361 BoolObjectClosure* is_subject_to_discovery_closure() const { return _is_subject_to_discovery; }
362 void set_is_subject_to_discovery_closure(BoolObjectClosure* cl) { _is_subject_to_discovery = cl; }
363
364 // start and stop weak ref discovery
365 void enable_discovery(bool check_no_refs = true);
366 void disable_discovery() { _discovering_refs = false; }
367 bool discovery_enabled() { return _discovering_refs; }
368
369 // whether discovery is atomic wrt other collectors
370 bool discovery_is_atomic() const { return _discovery_is_atomic; }
371 void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; }
372
373 // whether discovery is done by multiple threads same-old-timeously
374 bool discovery_is_mt() const { return _discovery_is_mt; }
375 void set_mt_discovery(bool mt) { _discovery_is_mt = mt; }
376
377 // Whether we are in a phase when _processing_ is MT.
378 bool processing_is_mt() const { return _processing_is_mt; }
379 void set_mt_processing(bool mt) { _processing_is_mt = mt; }
380
381 // whether all enqueueing of weak references is complete
382 bool enqueuing_is_done() { return _enqueuing_is_done; }
402 VoidClosure* complete_gc,
403 AbstractRefProcTaskExecutor* task_executor,
404 ReferenceProcessorPhaseTimes* phase_times);
405
406 // Enqueue references at end of GC (called by the garbage collector)
407 void enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor,
408 ReferenceProcessorPhaseTimes* phase_times);
409
410 // If a discovery is in process that is being superceded, abandon it: all
411 // the discovered lists will be empty, and all the objects on them will
412 // have NULL discovered fields. Must be called only at a safepoint.
413 void abandon_partial_discovery();
414
415 size_t total_reference_count(ReferenceType rt) const;
416
417 // debugging
418 void verify_no_references_recorded() PRODUCT_RETURN;
419 void verify_referent(oop obj) PRODUCT_RETURN;
420 };
421
422 // A subject-to-discovery closure that uses a single memory span to determine the area that
423 // is subject to discovery. Useful for collectors which have contiguous generations.
424 class SpanSubjectToDiscoveryClosure : public BoolObjectClosure {
425 MemRegion _span;
426
427 public:
428 SpanSubjectToDiscoveryClosure() : BoolObjectClosure(), _span() { }
429 SpanSubjectToDiscoveryClosure(MemRegion span) : BoolObjectClosure(), _span(span) { }
430
431 MemRegion span() const { return _span; }
432
433 void set_span(MemRegion mr) {
434 _span = mr;
435 }
436
437 virtual bool do_object_b(oop obj) {
438 return _span.contains(obj);
439 }
440 };
441
442 // A utility class to disable reference discovery in
443 // the scope which contains it, for given ReferenceProcessor.
444 class NoRefDiscovery: StackObj {
445 private:
446 ReferenceProcessor* _rp;
447 bool _was_discovering_refs;
448 public:
449 NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) {
450 _was_discovering_refs = _rp->discovery_enabled();
451 if (_was_discovering_refs) {
452 _rp->disable_discovery();
453 }
454 }
455
456 ~NoRefDiscovery() {
457 if (_was_discovering_refs) {
458 _rp->enable_discovery(false /*check_no_refs*/);
459 }
460 }
461 };
462
463 // A utility class to temporarily mutate the subject discovery closure of the
464 // given ReferenceProcessor in the scope that contains it.
465 class ReferenceProcessorSubjectToDiscoveryMutator : StackObj {
466 ReferenceProcessor* _rp;
467 BoolObjectClosure* _saved_cl;
468
469 public:
470 ReferenceProcessorSubjectToDiscoveryMutator(ReferenceProcessor* rp, BoolObjectClosure* cl):
471 _rp(rp) {
472 _saved_cl = _rp->is_subject_to_discovery_closure();
473 _rp->set_is_subject_to_discovery_closure(cl);
474 }
475
476 ~ReferenceProcessorSubjectToDiscoveryMutator() {
477 _rp->set_is_subject_to_discovery_closure(_saved_cl);
478 }
479 };
480
481 // A utility class to temporarily mutate the span of the
482 // given ReferenceProcessor in the scope that contains it.
483 class ReferenceProcessorSpanMutator : StackObj {
484 ReferenceProcessor* _rp;
485 SpanSubjectToDiscoveryClosure _discoverer;
486 BoolObjectClosure* _old_discoverer;
487
488 public:
489 ReferenceProcessorSpanMutator(ReferenceProcessor* rp,
490 MemRegion span):
491 _rp(rp),
492 _discoverer(span),
493 _old_discoverer(rp->is_subject_to_discovery_closure()) {
494
495 rp->set_is_subject_to_discovery_closure(&_discoverer);
496 }
497
498 ~ReferenceProcessorSpanMutator() {
499 _rp->set_is_subject_to_discovery_closure(_old_discoverer);
500 }
501 };
502
503 // A utility class to temporarily change the MT'ness of
504 // reference discovery for the given ReferenceProcessor
505 // in the scope that contains it.
506 class ReferenceProcessorMTDiscoveryMutator: StackObj {
507 private:
508 ReferenceProcessor* _rp;
509 bool _saved_mt;
510
511 public:
512 ReferenceProcessorMTDiscoveryMutator(ReferenceProcessor* rp,
513 bool mt):
514 _rp(rp) {
515 _saved_mt = _rp->discovery_is_mt();
516 _rp->set_mt_discovery(mt);
517 }
518
519 ~ReferenceProcessorMTDiscoveryMutator() {
520 _rp->set_mt_discovery(_saved_mt);
521 }
522 };
523
524 // A utility class to temporarily change the disposition
525 // of the "is_alive_non_header" closure field of the
526 // given ReferenceProcessor in the scope that contains it.
527 class ReferenceProcessorIsAliveMutator: StackObj {
528 private:
529 ReferenceProcessor* _rp;
530 BoolObjectClosure* _saved_cl;
531
532 public:
533 ReferenceProcessorIsAliveMutator(ReferenceProcessor* rp,
534 BoolObjectClosure* cl):
535 _rp(rp) {
536 _saved_cl = _rp->is_alive_non_header();
537 _rp->set_is_alive_non_header(cl);
538 }
539
540 ~ReferenceProcessorIsAliveMutator() {
541 _rp->set_is_alive_non_header(_saved_cl);
542 }
|