31 // ReferenceProcessor class encapsulates the per-"collector" processing
32 // of java.lang.Reference objects for GC. The interface is useful for supporting
33 // a generational abstraction, in particular when there are multiple
34 // generations that are being independently collected -- possibly
35 // concurrently and/or incrementally. Note, however, that the
36 // ReferenceProcessor class abstracts away from a generational setting
37 // by using only a heap interval (called "span" below), thus allowing
38 // its use in a straightforward manner in a general, non-generational
39 // setting.
40 //
41 // The basic idea is that each ReferenceProcessor object concerns
42 // itself with ("weak") reference processing in a specific "span"
43 // of the heap of interest to a specific collector. Currently,
44 // the span is a convex interval of the heap, but, efficiency
45 // apart, there seems to be no reason it couldn't be extended
46 // (with appropriate modifications) to any "non-convex interval".
47
48 // forward references
49 class ReferencePolicy;
50 class AbstractRefProcTaskExecutor;
51 class DiscoveredList;
52
53 class ReferenceProcessor : public CHeapObj {
54 protected:
55 // Compatibility with pre-4965777 JDK's
56 static bool _pending_list_uses_discovered_field;
57 MemRegion _span; // (right-open) interval of heap
58 // subject to wkref discovery
59 bool _discovering_refs; // true when discovery enabled
60 bool _discovery_is_atomic; // if discovery is atomic wrt
61 // other collectors in configuration
62 bool _discovery_is_mt; // true if reference discovery is MT.
63 // If true, setting "next" field of a discovered refs list requires
64 // write barrier(s). (Must be true if used in a collector in which
65 // elements of a discovered list may be moved during discovery: for
66 // example, a collector like Garbage-First that moves objects during a
67 // long-term concurrent marking phase that does weak reference
68 // discovery.)
69 bool _discovered_list_needs_barrier;
70 BarrierSet* _bs; // Cached copy of BarrierSet.
71 bool _enqueuing_is_done; // true if all weak references enqueued
72 bool _processing_is_mt; // true during phases when
73 // reference processing is MT.
74 int _next_id; // round-robin mod _num_q counter in
75 // support of work distribution
76
77 // For collectors that do not keep GC marking information
78 // in the object header, this field holds a closure that
79 // helps the reference processor determine the reachability
80 // of an oop (the field is currently initialized to NULL for
81 // all collectors but the CMS collector).
82 BoolObjectClosure* _is_alive_non_header;
83
84 // Soft ref clearing policies
85 // . the default policy
86 static ReferencePolicy* _default_soft_ref_policy;
87 // . the "clear all" policy
88 static ReferencePolicy* _always_clear_soft_ref_policy;
89 // . the current policy below is either one of the above
90 ReferencePolicy* _current_soft_ref_policy;
91
92 // The discovered ref lists themselves
93
94 // The active MT'ness degree of the queues below
95 int _num_q;
96 // The maximum MT'ness degree of the queues below
97 int _max_num_q;
98 // Arrays of lists of oops, one per thread
99 DiscoveredList* _discoveredSoftRefs;
100 DiscoveredList* _discoveredWeakRefs;
101 DiscoveredList* _discoveredFinalRefs;
102 DiscoveredList* _discoveredPhantomRefs;
103
104 public:
105 int num_q() { return _num_q; }
106 int max_num_q() { return _max_num_q; }
107 void set_active_mt_degree(int v) { _num_q = v; }
108 DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
109 ReferencePolicy* setup_policy(bool always_clear) {
110 _current_soft_ref_policy = always_clear ?
111 _always_clear_soft_ref_policy : _default_soft_ref_policy;
112 _current_soft_ref_policy->setup(); // snapshot the policy threshold
113 return _current_soft_ref_policy;
114 }
115
116 // Process references with a certain reachability level.
117 void process_discovered_reflist(DiscoveredList refs_lists[],
118 ReferencePolicy* policy,
119 bool clear_referent,
120 BoolObjectClosure* is_alive,
121 OopClosure* keep_alive,
122 VoidClosure* complete_gc,
123 AbstractRefProcTaskExecutor* task_executor);
124
125 void process_phaseJNI(BoolObjectClosure* is_alive,
126 OopClosure* keep_alive,
127 VoidClosure* complete_gc);
128
188 // Delete entries in the discovered lists that have
189 // either a null referent or are not active. Such
190 // Reference objects can result from the clearing
191 // or enqueueing of Reference objects concurrent
192 // with their discovery by a (concurrent) collector.
193 // For a definition of "active" see java.lang.ref.Reference;
194 // Refs are born active, become inactive when enqueued,
195 // and never become active again. The state of being
196 // active is encoded as follows: A Ref is active
197 // if and only if its "next" field is NULL.
198 void clean_up_discovered_references();
199 void clean_up_discovered_reflist(DiscoveredList& refs_list);
200
201 // Returns the name of the discovered reference list
202 // occupying the i / _num_q slot.
203 const char* list_name(int i);
204
205 void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
206
207 protected:
208 // "Preclean" the given discovered reference list
209 // by removing references with strongly reachable referents.
210 // Currently used in support of CMS only.
211 void preclean_discovered_reflist(DiscoveredList& refs_list,
212 BoolObjectClosure* is_alive,
213 OopClosure* keep_alive,
214 VoidClosure* complete_gc,
215 YieldClosure* yield);
216
217 // round-robin mod _num_q (not: _not_ mode _max_num_q)
218 int next_id() {
219 int id = _next_id;
220 if (++_next_id == _num_q) {
221 _next_id = 0;
222 }
223 return id;
224 }
225 DiscoveredList* get_discovered_list(ReferenceType rt);
226 inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj,
227 HeapWord* discovered_addr);
273 DiscoveryPolicyMin = ReferenceBasedDiscovery,
274 DiscoveryPolicyMax = ReferentBasedDiscovery
275 };
276
277 static void init_statics();
278
279 public:
280 // get and set "is_alive_non_header" field
281 BoolObjectClosure* is_alive_non_header() {
282 return _is_alive_non_header;
283 }
284 void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) {
285 _is_alive_non_header = is_alive_non_header;
286 }
287
288 // get and set span
289 MemRegion span() { return _span; }
290 void set_span(MemRegion span) { _span = span; }
291
292 // start and stop weak ref discovery
293 void enable_discovery() { _discovering_refs = true; }
294 void disable_discovery() { _discovering_refs = false; }
295 bool discovery_enabled() { return _discovering_refs; }
296
297 // whether discovery is atomic wrt other collectors
298 bool discovery_is_atomic() const { return _discovery_is_atomic; }
299 void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; }
300
301 // whether the JDK in which we are embedded is a pre-4965777 JDK,
302 // and thus whether or not it uses the discovered field to chain
303 // the entries in the pending list.
304 static bool pending_list_uses_discovered_field() {
305 return _pending_list_uses_discovered_field;
306 }
307
308 // whether discovery is done by multiple threads same-old-timeously
309 bool discovery_is_mt() const { return _discovery_is_mt; }
310 void set_mt_discovery(bool mt) { _discovery_is_mt = mt; }
311
312 // Whether we are in a phase when _processing_ is MT.
313 bool processing_is_mt() const { return _processing_is_mt; }
348 // clear the discovered lists (unlinking each entry).
349 void clear_discovered_references() PRODUCT_RETURN;
350 };
351
352 // A utility class to disable reference discovery in
353 // the scope which contains it, for given ReferenceProcessor.
354 class NoRefDiscovery: StackObj {
355 private:
356 ReferenceProcessor* _rp;
357 bool _was_discovering_refs;
358 public:
359 NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) {
360 _was_discovering_refs = _rp->discovery_enabled();
361 if (_was_discovering_refs) {
362 _rp->disable_discovery();
363 }
364 }
365
366 ~NoRefDiscovery() {
367 if (_was_discovering_refs) {
368 _rp->enable_discovery();
369 }
370 }
371 };
372
373
374 // A utility class to temporarily mutate the span of the
375 // given ReferenceProcessor in the scope that contains it.
376 class ReferenceProcessorSpanMutator: StackObj {
377 private:
378 ReferenceProcessor* _rp;
379 MemRegion _saved_span;
380
381 public:
382 ReferenceProcessorSpanMutator(ReferenceProcessor* rp,
383 MemRegion span):
384 _rp(rp) {
385 _saved_span = _rp->span();
386 _rp->set_span(span);
387 }
388
|
31 // ReferenceProcessor class encapsulates the per-"collector" processing
32 // of java.lang.Reference objects for GC. The interface is useful for supporting
33 // a generational abstraction, in particular when there are multiple
34 // generations that are being independently collected -- possibly
35 // concurrently and/or incrementally. Note, however, that the
36 // ReferenceProcessor class abstracts away from a generational setting
37 // by using only a heap interval (called "span" below), thus allowing
38 // its use in a straightforward manner in a general, non-generational
39 // setting.
40 //
41 // The basic idea is that each ReferenceProcessor object concerns
42 // itself with ("weak") reference processing in a specific "span"
43 // of the heap of interest to a specific collector. Currently,
44 // the span is a convex interval of the heap, but, efficiency
45 // apart, there seems to be no reason it couldn't be extended
46 // (with appropriate modifications) to any "non-convex interval".
47
48 // forward references
49 class ReferencePolicy;
50 class AbstractRefProcTaskExecutor;
51
52 // List of discovered references.
53 class DiscoveredList {
54 public:
55 DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
56 oop head() const {
57 return UseCompressedOops ? oopDesc::decode_heap_oop(_compressed_head) :
58 _oop_head;
59 }
60 HeapWord* adr_head() {
61 return UseCompressedOops ? (HeapWord*)&_compressed_head :
62 (HeapWord*)&_oop_head;
63 }
64 void set_head(oop o) {
65 if (UseCompressedOops) {
66 // Must compress the head ptr.
67 _compressed_head = oopDesc::encode_heap_oop(o);
68 } else {
69 _oop_head = o;
70 }
71 }
72 bool is_empty() const { return head() == NULL; }
73 size_t length() { return _len; }
74 void set_length(size_t len) { _len = len; }
75 void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
76 void dec_length(size_t dec) { _len -= dec; }
77 private:
78 // Set value depending on UseCompressedOops. This could be a template class
79 // but then we have to fix all the instantiations and declarations that use this class.
80 oop _oop_head;
81 narrowOop _compressed_head;
82 size_t _len;
83 };
84
85 // Iterator for the list of discovered references.
86 class DiscoveredListIterator {
87 private:
88 DiscoveredList& _refs_list;
89 HeapWord* _prev_next;
90 oop _prev;
91 oop _ref;
92 HeapWord* _discovered_addr;
93 oop _next;
94 HeapWord* _referent_addr;
95 oop _referent;
96 OopClosure* _keep_alive;
97 BoolObjectClosure* _is_alive;
98
99 DEBUG_ONLY(
100 oop _first_seen; // cyclic linked list check
101 )
102
103 NOT_PRODUCT(
104 size_t _processed;
105 size_t _removed;
106 )
107
108 public:
109 inline DiscoveredListIterator(DiscoveredList& refs_list,
110 OopClosure* keep_alive,
111 BoolObjectClosure* is_alive):
112 _refs_list(refs_list),
113 _prev_next(refs_list.adr_head()),
114 _prev(NULL),
115 _ref(refs_list.head()),
116 #ifdef ASSERT
117 _first_seen(refs_list.head()),
118 #endif
119 #ifndef PRODUCT
120 _processed(0),
121 _removed(0),
122 #endif
123 _next(NULL),
124 _keep_alive(keep_alive),
125 _is_alive(is_alive)
126 { }
127
128 // End Of List.
129 inline bool has_next() const { return _ref != NULL; }
130
131 // Get oop to the Reference object.
132 inline oop obj() const { return _ref; }
133
134 // Get oop to the referent object.
135 inline oop referent() const { return _referent; }
136
137 // Returns true if referent is alive.
138 inline bool is_referent_alive() const {
139 return _is_alive->do_object_b(_referent);
140 }
141
142 // Loads data for the current reference.
143 // The "allow_null_referent" argument tells us to allow for the possibility
144 // of a NULL referent in the discovered Reference object. This typically
145 // happens in the case of concurrent collectors that may have done the
146 // discovery concurrently, or interleaved, with mutator execution.
147 void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
148
149 // Move to the next discovered reference.
150 inline void next() {
151 _prev_next = _discovered_addr;
152 _prev = _ref;
153 move_to_next();
154 }
155
156 // Remove the current reference from the list
157 void remove();
158
159 // Make the Reference object active again.
160 void make_active();
161
162 // Make the referent alive.
163 inline void make_referent_alive() {
164 if (UseCompressedOops) {
165 _keep_alive->do_oop((narrowOop*)_referent_addr);
166 } else {
167 _keep_alive->do_oop((oop*)_referent_addr);
168 }
169 }
170
171 // Update the discovered field.
172 inline void update_discovered() {
173 // First _prev_next ref actually points into DiscoveredList (gross).
174 if (UseCompressedOops) {
175 if (!oopDesc::is_null(*(narrowOop*)_prev_next)) {
176 _keep_alive->do_oop((narrowOop*)_prev_next);
177 }
178 } else {
179 if (!oopDesc::is_null(*(oop*)_prev_next)) {
180 _keep_alive->do_oop((oop*)_prev_next);
181 }
182 }
183 }
184
185 // NULL out referent pointer.
186 void clear_referent();
187
188 // Statistics
189 NOT_PRODUCT(
190 inline size_t processed() const { return _processed; }
191 inline size_t removed() const { return _removed; }
192 )
193
194 inline void move_to_next() {
195 if (_ref == _next) {
196 // End of the list.
197 _ref = NULL;
198 } else {
199 _ref = _next;
200 }
201 assert(_ref != _first_seen, "cyclic ref_list found");
202 NOT_PRODUCT(_processed++);
203 }
204
205 };
206
207 class ReferenceProcessor : public CHeapObj {
208 protected:
209 // Compatibility with pre-4965777 JDK's
210 static bool _pending_list_uses_discovered_field;
211
212 MemRegion _span; // (right-open) interval of heap
213 // subject to wkref discovery
214
215 bool _discovering_refs; // true when discovery enabled
216 bool _discovery_is_atomic; // if discovery is atomic wrt
217 // other collectors in configuration
218 bool _discovery_is_mt; // true if reference discovery is MT.
219
220 // If true, setting "next" field of a discovered refs list requires
221 // write barrier(s). (Must be true if used in a collector in which
222 // elements of a discovered list may be moved during discovery: for
223 // example, a collector like Garbage-First that moves objects during a
224 // long-term concurrent marking phase that does weak reference
225 // discovery.)
226 bool _discovered_list_needs_barrier;
227
228 BarrierSet* _bs; // Cached copy of BarrierSet.
229 bool _enqueuing_is_done; // true if all weak references enqueued
230 bool _processing_is_mt; // true during phases when
231 // reference processing is MT.
232 int _next_id; // round-robin mod _num_q counter in
233 // support of work distribution
234
235 // For collectors that do not keep GC liveness information
236 // in the object header, this field holds a closure that
237 // helps the reference processor determine the reachability
238 // of an oop. It is currently initialized to NULL for all
239 // collectors except for CMS and G1.
240 BoolObjectClosure* _is_alive_non_header;
241
242 // Soft ref clearing policies
243 // . the default policy
244 static ReferencePolicy* _default_soft_ref_policy;
245 // . the "clear all" policy
246 static ReferencePolicy* _always_clear_soft_ref_policy;
247 // . the current policy below is either one of the above
248 ReferencePolicy* _current_soft_ref_policy;
249
250 // The discovered ref lists themselves
251
252 // The active MT'ness degree of the queues below
253 int _num_q;
254 // The maximum MT'ness degree of the queues below
255 int _max_num_q;
256 // Arrays of lists of oops, one per thread
257 DiscoveredList* _discoveredSoftRefs;
258 DiscoveredList* _discoveredWeakRefs;
259 DiscoveredList* _discoveredFinalRefs;
260 DiscoveredList* _discoveredPhantomRefs;
261
262 public:
263 static int number_of_subclasses_of_ref() { return (REF_PHANTOM - REF_OTHER); }
264
265 int num_q() { return _num_q; }
266 int max_num_q() { return _max_num_q; }
267 void set_active_mt_degree(int v) { _num_q = v; }
268 DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
269
270 ReferencePolicy* setup_policy(bool always_clear) {
271 _current_soft_ref_policy = always_clear ?
272 _always_clear_soft_ref_policy : _default_soft_ref_policy;
273 _current_soft_ref_policy->setup(); // snapshot the policy threshold
274 return _current_soft_ref_policy;
275 }
276
277 // Process references with a certain reachability level.
278 void process_discovered_reflist(DiscoveredList refs_lists[],
279 ReferencePolicy* policy,
280 bool clear_referent,
281 BoolObjectClosure* is_alive,
282 OopClosure* keep_alive,
283 VoidClosure* complete_gc,
284 AbstractRefProcTaskExecutor* task_executor);
285
286 void process_phaseJNI(BoolObjectClosure* is_alive,
287 OopClosure* keep_alive,
288 VoidClosure* complete_gc);
289
349 // Delete entries in the discovered lists that have
350 // either a null referent or are not active. Such
351 // Reference objects can result from the clearing
352 // or enqueueing of Reference objects concurrent
353 // with their discovery by a (concurrent) collector.
354 // For a definition of "active" see java.lang.ref.Reference;
355 // Refs are born active, become inactive when enqueued,
356 // and never become active again. The state of being
357 // active is encoded as follows: A Ref is active
358 // if and only if its "next" field is NULL.
359 void clean_up_discovered_references();
360 void clean_up_discovered_reflist(DiscoveredList& refs_list);
361
362 // Returns the name of the discovered reference list
363 // occupying the i / _num_q slot.
364 const char* list_name(int i);
365
366 void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
367
368 protected:
369 // Set the 'discovered' field of the given reference to
370 // the given value - emitting barriers depending upon
371 // the value of _discovered_list_needs_barrier.
372 void set_discovered(oop ref, oop value);
373
374 // "Preclean" the given discovered reference list
375 // by removing references with strongly reachable referents.
376 // Currently used in support of CMS only.
377 void preclean_discovered_reflist(DiscoveredList& refs_list,
378 BoolObjectClosure* is_alive,
379 OopClosure* keep_alive,
380 VoidClosure* complete_gc,
381 YieldClosure* yield);
382
383 // round-robin mod _num_q (not: _not_ mode _max_num_q)
384 int next_id() {
385 int id = _next_id;
386 if (++_next_id == _num_q) {
387 _next_id = 0;
388 }
389 return id;
390 }
391 DiscoveredList* get_discovered_list(ReferenceType rt);
392 inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj,
393 HeapWord* discovered_addr);
439 DiscoveryPolicyMin = ReferenceBasedDiscovery,
440 DiscoveryPolicyMax = ReferentBasedDiscovery
441 };
442
443 static void init_statics();
444
445 public:
446 // get and set "is_alive_non_header" field
447 BoolObjectClosure* is_alive_non_header() {
448 return _is_alive_non_header;
449 }
450 void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) {
451 _is_alive_non_header = is_alive_non_header;
452 }
453
454 // get and set span
455 MemRegion span() { return _span; }
456 void set_span(MemRegion span) { _span = span; }
457
458 // start and stop weak ref discovery
459 void enable_discovery(bool verify_disabled, bool check_no_refs) {
460 #ifdef ASSERT
461 // Verify that we're not currently discovering refs
462 assert(!verify_disabled || !_discovering_refs, "nested call?");
463
464 if (check_no_refs) {
465 // Verify that the discovered lists are empty
466 verify_no_references_recorded();
467 }
468 #endif // ASSERT
469 _discovering_refs = true;
470 }
471
472 void disable_discovery() { _discovering_refs = false; }
473 bool discovery_enabled() { return _discovering_refs; }
474
475 // whether discovery is atomic wrt other collectors
476 bool discovery_is_atomic() const { return _discovery_is_atomic; }
477 void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; }
478
479 // whether the JDK in which we are embedded is a pre-4965777 JDK,
480 // and thus whether or not it uses the discovered field to chain
481 // the entries in the pending list.
482 static bool pending_list_uses_discovered_field() {
483 return _pending_list_uses_discovered_field;
484 }
485
486 // whether discovery is done by multiple threads same-old-timeously
487 bool discovery_is_mt() const { return _discovery_is_mt; }
488 void set_mt_discovery(bool mt) { _discovery_is_mt = mt; }
489
490 // Whether we are in a phase when _processing_ is MT.
491 bool processing_is_mt() const { return _processing_is_mt; }
526 // clear the discovered lists (unlinking each entry).
527 void clear_discovered_references() PRODUCT_RETURN;
528 };
529
530 // A utility class to disable reference discovery in
531 // the scope which contains it, for given ReferenceProcessor.
532 class NoRefDiscovery: StackObj {
533 private:
534 ReferenceProcessor* _rp;
535 bool _was_discovering_refs;
536 public:
537 NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) {
538 _was_discovering_refs = _rp->discovery_enabled();
539 if (_was_discovering_refs) {
540 _rp->disable_discovery();
541 }
542 }
543
544 ~NoRefDiscovery() {
545 if (_was_discovering_refs) {
546 _rp->enable_discovery(true /*verify_disabled*/, false /*check_no_refs*/);
547 }
548 }
549 };
550
551
552 // A utility class to temporarily mutate the span of the
553 // given ReferenceProcessor in the scope that contains it.
554 class ReferenceProcessorSpanMutator: StackObj {
555 private:
556 ReferenceProcessor* _rp;
557 MemRegion _saved_span;
558
559 public:
560 ReferenceProcessorSpanMutator(ReferenceProcessor* rp,
561 MemRegion span):
562 _rp(rp) {
563 _saved_span = _rp->span();
564 _rp->set_span(span);
565 }
566
|