Print this page
rev 2691 : [mq]: g1-reference-processing
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/memory/referenceProcessor.hpp
+++ new/src/share/vm/memory/referenceProcessor.hpp
1 1 /*
2 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #ifndef SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP
26 26 #define SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP
27 27
28 28 #include "memory/referencePolicy.hpp"
29 29 #include "oops/instanceRefKlass.hpp"
30 30
31 31 // ReferenceProcessor class encapsulates the per-"collector" processing
32 32 // of java.lang.Reference objects for GC. The interface is useful for supporting
33 33 // a generational abstraction, in particular when there are multiple
34 34 // generations that are being independently collected -- possibly
35 35 // concurrently and/or incrementally. Note, however, that the
36 36 // ReferenceProcessor class abstracts away from a generational setting
37 37 // by using only a heap interval (called "span" below), thus allowing
38 38 // its use in a straightforward manner in a general, non-generational
39 39 // setting.
40 40 //
↓ open down ↓ |
40 lines elided |
↑ open up ↑ |
41 41 // The basic idea is that each ReferenceProcessor object concerns
42 42 // itself with ("weak") reference processing in a specific "span"
43 43 // of the heap of interest to a specific collector. Currently,
44 44 // the span is a convex interval of the heap, but, efficiency
45 45 // apart, there seems to be no reason it couldn't be extended
46 46 // (with appropriate modifications) to any "non-convex interval".
47 47
48 48 // forward references
49 49 class ReferencePolicy;
50 50 class AbstractRefProcTaskExecutor;
51 -class DiscoveredList;
51 +
52 +// List of discovered references.
53 +class DiscoveredList {
54 +public:
55 + DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
56 + oop head() const {
57 + return UseCompressedOops ? oopDesc::decode_heap_oop(_compressed_head) :
58 + _oop_head;
59 + }
60 + HeapWord* adr_head() {
61 + return UseCompressedOops ? (HeapWord*)&_compressed_head :
62 + (HeapWord*)&_oop_head;
63 + }
64 + void set_head(oop o) {
65 + if (UseCompressedOops) {
66 + // Must compress the head ptr.
67 + _compressed_head = oopDesc::encode_heap_oop(o);
68 + } else {
69 + _oop_head = o;
70 + }
71 + }
72 + bool is_empty() const { return head() == NULL; }
73 + size_t length() { return _len; }
74 + void set_length(size_t len) { _len = len; }
75 + void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
76 + void dec_length(size_t dec) { _len -= dec; }
77 +private:
78 + // Set value depending on UseCompressedOops. This could be a template class
79 + // but then we have to fix all the instantiations and declarations that use this class.
80 + oop _oop_head;
81 + narrowOop _compressed_head;
82 + size_t _len;
83 +};
84 +
85 +// Iterator for the list of discovered references.
86 +class DiscoveredListIterator {
87 +private:
88 + DiscoveredList& _refs_list;
89 + HeapWord* _prev_next;
90 + oop _prev;
91 + oop _ref;
92 + HeapWord* _discovered_addr;
93 + oop _next;
94 + HeapWord* _referent_addr;
95 + oop _referent;
96 + OopClosure* _keep_alive;
97 + BoolObjectClosure* _is_alive;
98 +
99 + DEBUG_ONLY(
100 + oop _first_seen; // cyclic linked list check
101 + )
102 +
103 + NOT_PRODUCT(
104 + size_t _processed;
105 + size_t _removed;
106 + )
107 +
108 +public:
109 + inline DiscoveredListIterator(DiscoveredList& refs_list,
110 + OopClosure* keep_alive,
111 + BoolObjectClosure* is_alive):
112 + _refs_list(refs_list),
113 + _prev_next(refs_list.adr_head()),
114 + _prev(NULL),
115 + _ref(refs_list.head()),
116 +#ifdef ASSERT
117 + _first_seen(refs_list.head()),
118 +#endif
119 +#ifndef PRODUCT
120 + _processed(0),
121 + _removed(0),
122 +#endif
123 + _next(NULL),
124 + _keep_alive(keep_alive),
125 + _is_alive(is_alive)
126 +{ }
127 +
128 + // End Of List.
129 + inline bool has_next() const { return _ref != NULL; }
130 +
131 + // Get oop to the Reference object.
132 + inline oop obj() const { return _ref; }
133 +
134 + // Get oop to the referent object.
135 + inline oop referent() const { return _referent; }
136 +
137 + // Returns true if referent is alive.
138 + inline bool is_referent_alive() const {
139 + return _is_alive->do_object_b(_referent);
140 + }
141 +
142 + // Loads data for the current reference.
143 + // The "allow_null_referent" argument tells us to allow for the possibility
144 + // of a NULL referent in the discovered Reference object. This typically
145 + // happens in the case of concurrent collectors that may have done the
146 + // discovery concurrently, or interleaved, with mutator execution.
147 + void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
148 +
149 + // Move to the next discovered reference.
150 + inline void next() {
151 + _prev_next = _discovered_addr;
152 + _prev = _ref;
153 + move_to_next();
154 + }
155 +
156 + // Remove the current reference from the list
157 + void remove();
158 +
159 + // Make the Reference object active again.
160 + void make_active();
161 +
162 + // Make the referent alive.
163 + inline void make_referent_alive() {
164 + if (UseCompressedOops) {
165 + _keep_alive->do_oop((narrowOop*)_referent_addr);
166 + } else {
167 + _keep_alive->do_oop((oop*)_referent_addr);
168 + }
169 + }
170 +
171 + // Update the discovered field.
172 + inline void update_discovered() {
173 + // First _prev_next ref actually points into DiscoveredList (gross).
174 + if (UseCompressedOops) {
175 + if (!oopDesc::is_null(*(narrowOop*)_prev_next)) {
176 + _keep_alive->do_oop((narrowOop*)_prev_next);
177 + }
178 + } else {
179 + if (!oopDesc::is_null(*(oop*)_prev_next)) {
180 + _keep_alive->do_oop((oop*)_prev_next);
181 + }
182 + }
183 + }
184 +
185 + // NULL out referent pointer.
186 + void clear_referent();
187 +
188 + // Statistics
189 + NOT_PRODUCT(
190 + inline size_t processed() const { return _processed; }
191 + inline size_t removed() const { return _removed; }
192 + )
193 +
194 + inline void move_to_next() {
195 + if (_ref == _next) {
196 + // End of the list.
197 + _ref = NULL;
198 + } else {
199 + _ref = _next;
200 + }
201 + assert(_ref != _first_seen, "cyclic ref_list found");
202 + NOT_PRODUCT(_processed++);
203 + }
204 +
205 +};
52 206
53 207 class ReferenceProcessor : public CHeapObj {
54 208 protected:
55 209 // Compatibility with pre-4965777 JDK's
56 210 static bool _pending_list_uses_discovered_field;
57 - MemRegion _span; // (right-open) interval of heap
58 - // subject to wkref discovery
59 - bool _discovering_refs; // true when discovery enabled
60 - bool _discovery_is_atomic; // if discovery is atomic wrt
61 - // other collectors in configuration
62 - bool _discovery_is_mt; // true if reference discovery is MT.
211 +
212 + MemRegion _span; // (right-open) interval of heap
213 + // subject to wkref discovery
214 +
215 + bool _discovering_refs; // true when discovery enabled
216 + bool _discovery_is_atomic; // if discovery is atomic wrt
217 + // other collectors in configuration
218 + bool _discovery_is_mt; // true if reference discovery is MT.
219 +
63 220 // If true, setting "next" field of a discovered refs list requires
64 221 // write barrier(s). (Must be true if used in a collector in which
65 222 // elements of a discovered list may be moved during discovery: for
66 223 // example, a collector like Garbage-First that moves objects during a
67 224 // long-term concurrent marking phase that does weak reference
68 225 // discovery.)
69 226 bool _discovered_list_needs_barrier;
70 - BarrierSet* _bs; // Cached copy of BarrierSet.
71 - bool _enqueuing_is_done; // true if all weak references enqueued
72 - bool _processing_is_mt; // true during phases when
73 - // reference processing is MT.
74 - int _next_id; // round-robin mod _num_q counter in
75 - // support of work distribution
76 227
77 - // For collectors that do not keep GC marking information
228 + BarrierSet* _bs; // Cached copy of BarrierSet.
229 + bool _enqueuing_is_done; // true if all weak references enqueued
230 + bool _processing_is_mt; // true during phases when
231 + // reference processing is MT.
232 + int _next_id; // round-robin mod _num_q counter in
233 + // support of work distribution
234 +
235 + // For collectors that do not keep GC liveness information
78 236 // in the object header, this field holds a closure that
79 237 // helps the reference processor determine the reachability
80 - // of an oop (the field is currently initialized to NULL for
81 - // all collectors but the CMS collector).
238 + // of an oop. It is currently initialized to NULL for all
239 + // collectors except for CMS and G1.
82 240 BoolObjectClosure* _is_alive_non_header;
83 241
84 242 // Soft ref clearing policies
85 243 // . the default policy
86 244 static ReferencePolicy* _default_soft_ref_policy;
87 245 // . the "clear all" policy
88 246 static ReferencePolicy* _always_clear_soft_ref_policy;
89 247 // . the current policy below is either one of the above
90 248 ReferencePolicy* _current_soft_ref_policy;
91 249
92 250 // The discovered ref lists themselves
93 251
94 252 // The active MT'ness degree of the queues below
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
95 253 int _num_q;
96 254 // The maximum MT'ness degree of the queues below
97 255 int _max_num_q;
98 256 // Arrays of lists of oops, one per thread
99 257 DiscoveredList* _discoveredSoftRefs;
100 258 DiscoveredList* _discoveredWeakRefs;
101 259 DiscoveredList* _discoveredFinalRefs;
102 260 DiscoveredList* _discoveredPhantomRefs;
103 261
104 262 public:
105 - int num_q() { return _num_q; }
106 - int max_num_q() { return _max_num_q; }
107 - void set_active_mt_degree(int v) { _num_q = v; }
108 - DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
263 + static int number_of_subclasses_of_ref() { return (REF_PHANTOM - REF_OTHER); }
264 +
265 + int num_q() { return _num_q; }
266 + int max_num_q() { return _max_num_q; }
267 + void set_active_mt_degree(int v) { _num_q = v; }
268 + DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
269 +
109 270 ReferencePolicy* setup_policy(bool always_clear) {
110 271 _current_soft_ref_policy = always_clear ?
111 272 _always_clear_soft_ref_policy : _default_soft_ref_policy;
112 273 _current_soft_ref_policy->setup(); // snapshot the policy threshold
113 274 return _current_soft_ref_policy;
114 275 }
115 276
116 277 // Process references with a certain reachability level.
117 278 void process_discovered_reflist(DiscoveredList refs_lists[],
118 279 ReferencePolicy* policy,
119 280 bool clear_referent,
120 281 BoolObjectClosure* is_alive,
121 282 OopClosure* keep_alive,
122 283 VoidClosure* complete_gc,
123 284 AbstractRefProcTaskExecutor* task_executor);
124 285
125 286 void process_phaseJNI(BoolObjectClosure* is_alive,
126 287 OopClosure* keep_alive,
127 288 VoidClosure* complete_gc);
128 289
129 290 // Work methods used by the method process_discovered_reflist
130 291 // Phase1: keep alive all those referents that are otherwise
131 292 // dead but which must be kept alive by policy (and their closure).
132 293 void process_phase1(DiscoveredList& refs_list,
133 294 ReferencePolicy* policy,
134 295 BoolObjectClosure* is_alive,
135 296 OopClosure* keep_alive,
136 297 VoidClosure* complete_gc);
137 298 // Phase2: remove all those references whose referents are
138 299 // reachable.
139 300 inline void process_phase2(DiscoveredList& refs_list,
140 301 BoolObjectClosure* is_alive,
141 302 OopClosure* keep_alive,
142 303 VoidClosure* complete_gc) {
143 304 if (discovery_is_atomic()) {
144 305 // complete_gc is ignored in this case for this phase
145 306 pp2_work(refs_list, is_alive, keep_alive);
146 307 } else {
147 308 assert(complete_gc != NULL, "Error");
148 309 pp2_work_concurrent_discovery(refs_list, is_alive,
149 310 keep_alive, complete_gc);
150 311 }
151 312 }
152 313 // Work methods in support of process_phase2
153 314 void pp2_work(DiscoveredList& refs_list,
154 315 BoolObjectClosure* is_alive,
155 316 OopClosure* keep_alive);
156 317 void pp2_work_concurrent_discovery(
157 318 DiscoveredList& refs_list,
158 319 BoolObjectClosure* is_alive,
159 320 OopClosure* keep_alive,
160 321 VoidClosure* complete_gc);
161 322 // Phase3: process the referents by either clearing them
162 323 // or keeping them alive (and their closure)
163 324 void process_phase3(DiscoveredList& refs_list,
164 325 bool clear_referent,
165 326 BoolObjectClosure* is_alive,
166 327 OopClosure* keep_alive,
167 328 VoidClosure* complete_gc);
168 329
169 330 // Enqueue references with a certain reachability level
170 331 void enqueue_discovered_reflist(DiscoveredList& refs_list, HeapWord* pending_list_addr);
171 332
172 333 // "Preclean" all the discovered reference lists
173 334 // by removing references with strongly reachable referents.
174 335 // The first argument is a predicate on an oop that indicates
175 336 // its (strong) reachability and the second is a closure that
176 337 // may be used to incrementalize or abort the precleaning process.
177 338 // The caller is responsible for taking care of potential
178 339 // interference with concurrent operations on these lists
179 340 // (or predicates involved) by other threads. Currently
180 341 // only used by the CMS collector. should_unload_classes is
181 342 // used to aid assertion checking when classes are collected.
182 343 void preclean_discovered_references(BoolObjectClosure* is_alive,
183 344 OopClosure* keep_alive,
184 345 VoidClosure* complete_gc,
185 346 YieldClosure* yield,
186 347 bool should_unload_classes);
187 348
188 349 // Delete entries in the discovered lists that have
189 350 // either a null referent or are not active. Such
190 351 // Reference objects can result from the clearing
191 352 // or enqueueing of Reference objects concurrent
192 353 // with their discovery by a (concurrent) collector.
193 354 // For a definition of "active" see java.lang.ref.Reference;
194 355 // Refs are born active, become inactive when enqueued,
195 356 // and never become active again. The state of being
196 357 // active is encoded as follows: A Ref is active
197 358 // if and only if its "next" field is NULL.
↓ open down ↓ |
79 lines elided |
↑ open up ↑ |
198 359 void clean_up_discovered_references();
199 360 void clean_up_discovered_reflist(DiscoveredList& refs_list);
200 361
201 362 // Returns the name of the discovered reference list
202 363 // occupying the i / _num_q slot.
203 364 const char* list_name(int i);
204 365
205 366 void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
206 367
207 368 protected:
369 + // Set the 'discovered' field of the given reference to
370 + // the given value - emitting barriers depending upon
371 + // the value of _discovered_list_needs_barrier.
372 + void set_discovered(oop ref, oop value);
373 +
208 374 // "Preclean" the given discovered reference list
209 375 // by removing references with strongly reachable referents.
210 376 // Currently used in support of CMS only.
211 377 void preclean_discovered_reflist(DiscoveredList& refs_list,
212 378 BoolObjectClosure* is_alive,
213 379 OopClosure* keep_alive,
214 380 VoidClosure* complete_gc,
215 381 YieldClosure* yield);
216 382
217 383 // round-robin mod _num_q (not: _not_ mode _max_num_q)
218 384 int next_id() {
219 385 int id = _next_id;
220 386 if (++_next_id == _num_q) {
221 387 _next_id = 0;
222 388 }
223 389 return id;
224 390 }
225 391 DiscoveredList* get_discovered_list(ReferenceType rt);
226 392 inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj,
227 393 HeapWord* discovered_addr);
228 394 void verify_ok_to_handle_reflists() PRODUCT_RETURN;
229 395
230 396 void clear_discovered_references(DiscoveredList& refs_list);
231 397 void abandon_partial_discovered_list(DiscoveredList& refs_list);
232 398
233 399 // Calculate the number of jni handles.
234 400 unsigned int count_jni_refs();
235 401
236 402 // Balances reference queues.
237 403 void balance_queues(DiscoveredList ref_lists[]);
238 404
239 405 // Update (advance) the soft ref master clock field.
240 406 void update_soft_ref_master_clock();
241 407
242 408 public:
243 409 // constructor
244 410 ReferenceProcessor():
245 411 _span((HeapWord*)NULL, (HeapWord*)NULL),
246 412 _discoveredSoftRefs(NULL), _discoveredWeakRefs(NULL),
247 413 _discoveredFinalRefs(NULL), _discoveredPhantomRefs(NULL),
248 414 _discovering_refs(false),
249 415 _discovery_is_atomic(true),
250 416 _enqueuing_is_done(false),
251 417 _discovery_is_mt(false),
252 418 _discovered_list_needs_barrier(false),
253 419 _bs(NULL),
254 420 _is_alive_non_header(NULL),
255 421 _num_q(0),
256 422 _max_num_q(0),
257 423 _processing_is_mt(false),
258 424 _next_id(0)
259 425 { }
260 426
261 427 // Default parameters give you a vanilla reference processor.
262 428 ReferenceProcessor(MemRegion span,
263 429 bool mt_processing = false, int mt_processing_degree = 1,
264 430 bool mt_discovery = false, int mt_discovery_degree = 1,
265 431 bool atomic_discovery = true,
266 432 BoolObjectClosure* is_alive_non_header = NULL,
267 433 bool discovered_list_needs_barrier = false);
268 434
269 435 // RefDiscoveryPolicy values
270 436 enum DiscoveryPolicy {
271 437 ReferenceBasedDiscovery = 0,
272 438 ReferentBasedDiscovery = 1,
273 439 DiscoveryPolicyMin = ReferenceBasedDiscovery,
274 440 DiscoveryPolicyMax = ReferentBasedDiscovery
275 441 };
276 442
277 443 static void init_statics();
278 444
279 445 public:
280 446 // get and set "is_alive_non_header" field
281 447 BoolObjectClosure* is_alive_non_header() {
282 448 return _is_alive_non_header;
↓ open down ↓ |
65 lines elided |
↑ open up ↑ |
283 449 }
284 450 void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) {
285 451 _is_alive_non_header = is_alive_non_header;
286 452 }
287 453
288 454 // get and set span
289 455 MemRegion span() { return _span; }
290 456 void set_span(MemRegion span) { _span = span; }
291 457
292 458 // start and stop weak ref discovery
293 - void enable_discovery() { _discovering_refs = true; }
459 + void enable_discovery(bool verify_disabled, bool check_no_refs) {
460 +#ifdef ASSERT
461 + // Verify that we're not currently discovering refs
462 + assert(!verify_disabled || !_discovering_refs, "nested call?");
463 +
464 + if (check_no_refs) {
465 + // Verify that the discovered lists are empty
466 + verify_no_references_recorded();
467 + }
468 +#endif // ASSERT
469 + _discovering_refs = true;
470 + }
471 +
294 472 void disable_discovery() { _discovering_refs = false; }
295 473 bool discovery_enabled() { return _discovering_refs; }
296 474
297 475 // whether discovery is atomic wrt other collectors
298 476 bool discovery_is_atomic() const { return _discovery_is_atomic; }
299 477 void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; }
300 478
301 479 // whether the JDK in which we are embedded is a pre-4965777 JDK,
302 480 // and thus whether or not it uses the discovered field to chain
303 481 // the entries in the pending list.
304 482 static bool pending_list_uses_discovered_field() {
305 483 return _pending_list_uses_discovered_field;
306 484 }
307 485
308 486 // whether discovery is done by multiple threads same-old-timeously
309 487 bool discovery_is_mt() const { return _discovery_is_mt; }
310 488 void set_mt_discovery(bool mt) { _discovery_is_mt = mt; }
311 489
312 490 // Whether we are in a phase when _processing_ is MT.
313 491 bool processing_is_mt() const { return _processing_is_mt; }
314 492 void set_mt_processing(bool mt) { _processing_is_mt = mt; }
315 493
316 494 // whether all enqueuing of weak references is complete
317 495 bool enqueuing_is_done() { return _enqueuing_is_done; }
318 496 void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; }
319 497
320 498 // iterate over oops
321 499 void weak_oops_do(OopClosure* f); // weak roots
322 500
323 501 // Balance each of the discovered lists.
324 502 void balance_all_queues();
325 503
326 504 // Discover a Reference object, using appropriate discovery criteria
327 505 bool discover_reference(oop obj, ReferenceType rt);
328 506
329 507 // Process references found during GC (called by the garbage collector)
330 508 void process_discovered_references(BoolObjectClosure* is_alive,
331 509 OopClosure* keep_alive,
332 510 VoidClosure* complete_gc,
333 511 AbstractRefProcTaskExecutor* task_executor);
334 512
335 513 public:
336 514 // Enqueue references at end of GC (called by the garbage collector)
337 515 bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL);
338 516
339 517 // If a discovery is in process that is being superceded, abandon it: all
340 518 // the discovered lists will be empty, and all the objects on them will
341 519 // have NULL discovered fields. Must be called only at a safepoint.
342 520 void abandon_partial_discovery();
343 521
344 522 // debugging
345 523 void verify_no_references_recorded() PRODUCT_RETURN;
346 524 void verify_referent(oop obj) PRODUCT_RETURN;
347 525
348 526 // clear the discovered lists (unlinking each entry).
349 527 void clear_discovered_references() PRODUCT_RETURN;
350 528 };
351 529
352 530 // A utility class to disable reference discovery in
353 531 // the scope which contains it, for given ReferenceProcessor.
354 532 class NoRefDiscovery: StackObj {
355 533 private:
356 534 ReferenceProcessor* _rp;
357 535 bool _was_discovering_refs;
↓ open down ↓ |
54 lines elided |
↑ open up ↑ |
358 536 public:
359 537 NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) {
360 538 _was_discovering_refs = _rp->discovery_enabled();
361 539 if (_was_discovering_refs) {
362 540 _rp->disable_discovery();
363 541 }
364 542 }
365 543
366 544 ~NoRefDiscovery() {
367 545 if (_was_discovering_refs) {
368 - _rp->enable_discovery();
546 + _rp->enable_discovery(true /*verify_disabled*/, false /*check_no_refs*/);
369 547 }
370 548 }
371 549 };
372 550
373 551
374 552 // A utility class to temporarily mutate the span of the
375 553 // given ReferenceProcessor in the scope that contains it.
376 554 class ReferenceProcessorSpanMutator: StackObj {
377 555 private:
378 556 ReferenceProcessor* _rp;
379 557 MemRegion _saved_span;
380 558
381 559 public:
382 560 ReferenceProcessorSpanMutator(ReferenceProcessor* rp,
383 561 MemRegion span):
384 562 _rp(rp) {
385 563 _saved_span = _rp->span();
386 564 _rp->set_span(span);
387 565 }
388 566
389 567 ~ReferenceProcessorSpanMutator() {
390 568 _rp->set_span(_saved_span);
391 569 }
392 570 };
393 571
394 572 // A utility class to temporarily change the MT'ness of
395 573 // reference discovery for the given ReferenceProcessor
396 574 // in the scope that contains it.
397 575 class ReferenceProcessorMTDiscoveryMutator: StackObj {
398 576 private:
399 577 ReferenceProcessor* _rp;
400 578 bool _saved_mt;
401 579
402 580 public:
403 581 ReferenceProcessorMTDiscoveryMutator(ReferenceProcessor* rp,
404 582 bool mt):
405 583 _rp(rp) {
406 584 _saved_mt = _rp->discovery_is_mt();
407 585 _rp->set_mt_discovery(mt);
408 586 }
409 587
410 588 ~ReferenceProcessorMTDiscoveryMutator() {
411 589 _rp->set_mt_discovery(_saved_mt);
412 590 }
413 591 };
414 592
415 593
416 594 // A utility class to temporarily change the disposition
417 595 // of the "is_alive_non_header" closure field of the
418 596 // given ReferenceProcessor in the scope that contains it.
419 597 class ReferenceProcessorIsAliveMutator: StackObj {
420 598 private:
421 599 ReferenceProcessor* _rp;
422 600 BoolObjectClosure* _saved_cl;
423 601
424 602 public:
425 603 ReferenceProcessorIsAliveMutator(ReferenceProcessor* rp,
426 604 BoolObjectClosure* cl):
427 605 _rp(rp) {
428 606 _saved_cl = _rp->is_alive_non_header();
429 607 _rp->set_is_alive_non_header(cl);
430 608 }
431 609
432 610 ~ReferenceProcessorIsAliveMutator() {
433 611 _rp->set_is_alive_non_header(_saved_cl);
434 612 }
435 613 };
436 614
437 615 // A utility class to temporarily change the disposition
438 616 // of the "discovery_is_atomic" field of the
439 617 // given ReferenceProcessor in the scope that contains it.
440 618 class ReferenceProcessorAtomicMutator: StackObj {
441 619 private:
442 620 ReferenceProcessor* _rp;
443 621 bool _saved_atomic_discovery;
444 622
445 623 public:
446 624 ReferenceProcessorAtomicMutator(ReferenceProcessor* rp,
447 625 bool atomic):
448 626 _rp(rp) {
449 627 _saved_atomic_discovery = _rp->discovery_is_atomic();
450 628 _rp->set_atomic_discovery(atomic);
451 629 }
452 630
453 631 ~ReferenceProcessorAtomicMutator() {
454 632 _rp->set_atomic_discovery(_saved_atomic_discovery);
455 633 }
456 634 };
457 635
458 636
459 637 // A utility class to temporarily change the MT processing
460 638 // disposition of the given ReferenceProcessor instance
461 639 // in the scope that contains it.
462 640 class ReferenceProcessorMTProcMutator: StackObj {
463 641 private:
464 642 ReferenceProcessor* _rp;
465 643 bool _saved_mt;
466 644
467 645 public:
468 646 ReferenceProcessorMTProcMutator(ReferenceProcessor* rp,
469 647 bool mt):
470 648 _rp(rp) {
471 649 _saved_mt = _rp->processing_is_mt();
472 650 _rp->set_mt_processing(mt);
473 651 }
474 652
475 653 ~ReferenceProcessorMTProcMutator() {
476 654 _rp->set_mt_processing(_saved_mt);
477 655 }
478 656 };
479 657
480 658
481 659 // This class is an interface used to implement task execution for the
482 660 // reference processing.
483 661 class AbstractRefProcTaskExecutor {
484 662 public:
485 663
486 664 // Abstract tasks to execute.
487 665 class ProcessTask;
488 666 class EnqueueTask;
489 667
490 668 // Executes a task using worker threads.
491 669 virtual void execute(ProcessTask& task) = 0;
492 670 virtual void execute(EnqueueTask& task) = 0;
493 671
494 672 // Switch to single threaded mode.
495 673 virtual void set_single_threaded_mode() { };
496 674 };
497 675
498 676 // Abstract reference processing task to execute.
499 677 class AbstractRefProcTaskExecutor::ProcessTask {
500 678 protected:
501 679 ProcessTask(ReferenceProcessor& ref_processor,
502 680 DiscoveredList refs_lists[],
503 681 bool marks_oops_alive)
504 682 : _ref_processor(ref_processor),
505 683 _refs_lists(refs_lists),
506 684 _marks_oops_alive(marks_oops_alive)
507 685 { }
508 686
509 687 public:
510 688 virtual void work(unsigned int work_id, BoolObjectClosure& is_alive,
511 689 OopClosure& keep_alive,
512 690 VoidClosure& complete_gc) = 0;
513 691
514 692 // Returns true if a task marks some oops as alive.
515 693 bool marks_oops_alive() const
516 694 { return _marks_oops_alive; }
517 695
518 696 protected:
519 697 ReferenceProcessor& _ref_processor;
520 698 DiscoveredList* _refs_lists;
521 699 const bool _marks_oops_alive;
522 700 };
523 701
524 702 // Abstract reference processing task to execute.
525 703 class AbstractRefProcTaskExecutor::EnqueueTask {
526 704 protected:
527 705 EnqueueTask(ReferenceProcessor& ref_processor,
528 706 DiscoveredList refs_lists[],
529 707 HeapWord* pending_list_addr,
530 708 int n_queues)
531 709 : _ref_processor(ref_processor),
532 710 _refs_lists(refs_lists),
533 711 _pending_list_addr(pending_list_addr),
534 712 _n_queues(n_queues)
535 713 { }
536 714
537 715 public:
538 716 virtual void work(unsigned int work_id) = 0;
539 717
540 718 protected:
541 719 ReferenceProcessor& _ref_processor;
542 720 DiscoveredList* _refs_lists;
543 721 HeapWord* _pending_list_addr;
544 722 int _n_queues;
545 723 };
546 724
547 725 #endif // SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP
↓ open down ↓ |
169 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX