Print this page
rev 2585 : [mq]: g1-reference-processing
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/memory/referenceProcessor.cpp
+++ new/src/share/vm/memory/referenceProcessor.cpp
1 1 /*
2 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "classfile/javaClasses.hpp"
27 27 #include "classfile/systemDictionary.hpp"
28 28 #include "gc_interface/collectedHeap.hpp"
↓ open down ↓ |
28 lines elided |
↑ open up ↑ |
29 29 #include "gc_interface/collectedHeap.inline.hpp"
30 30 #include "memory/referencePolicy.hpp"
31 31 #include "memory/referenceProcessor.hpp"
32 32 #include "oops/oop.inline.hpp"
33 33 #include "runtime/java.hpp"
34 34 #include "runtime/jniHandles.hpp"
35 35
36 36 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL;
37 37 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL;
38 38 oop ReferenceProcessor::_sentinelRef = NULL;
39 -const int subclasses_of_ref = REF_PHANTOM - REF_OTHER;
40 39
41 -// List of discovered references.
42 -class DiscoveredList {
43 -public:
44 - DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
45 - oop head() const {
46 - return UseCompressedOops ? oopDesc::decode_heap_oop_not_null(_compressed_head) :
47 - _oop_head;
48 - }
49 - HeapWord* adr_head() {
50 - return UseCompressedOops ? (HeapWord*)&_compressed_head :
51 - (HeapWord*)&_oop_head;
52 - }
53 - void set_head(oop o) {
54 - if (UseCompressedOops) {
55 - // Must compress the head ptr.
56 - _compressed_head = oopDesc::encode_heap_oop_not_null(o);
57 - } else {
58 - _oop_head = o;
59 - }
60 - }
61 - bool empty() const { return head() == ReferenceProcessor::sentinel_ref(); }
62 - size_t length() { return _len; }
63 - void set_length(size_t len) { _len = len; }
64 - void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
65 - void dec_length(size_t dec) { _len -= dec; }
66 -private:
67 - // Set value depending on UseCompressedOops. This could be a template class
68 - // but then we have to fix all the instantiations and declarations that use this class.
69 - oop _oop_head;
70 - narrowOop _compressed_head;
71 - size_t _len;
72 -};
40 +bool DiscoveredList::is_empty() const {
41 + return head() == ReferenceProcessor::sentinel_ref();
42 +}
73 43
74 44 void referenceProcessor_init() {
75 45 ReferenceProcessor::init_statics();
76 46 }
77 47
78 48 void ReferenceProcessor::init_statics() {
79 49 assert(_sentinelRef == NULL, "should be initialized precisely once");
80 50 EXCEPTION_MARK;
81 51 _sentinelRef = instanceKlass::cast(
82 52 SystemDictionary::Reference_klass())->
83 53 allocate_permanent_instance(THREAD);
84 54
85 55 // Initialize the master soft ref clock.
86 56 java_lang_ref_SoftReference::set_clock(os::javaTimeMillis());
87 57
88 58 if (HAS_PENDING_EXCEPTION) {
89 59 Handle ex(THREAD, PENDING_EXCEPTION);
90 60 vm_exit_during_initialization(ex);
91 61 }
92 62 assert(_sentinelRef != NULL && _sentinelRef->is_oop(),
93 63 "Just constructed it!");
94 64 _always_clear_soft_ref_policy = new AlwaysClearPolicy();
95 65 _default_soft_ref_policy = new COMPILER2_PRESENT(LRUMaxHeapPolicy())
96 66 NOT_COMPILER2(LRUCurrentHeapPolicy());
97 67 if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) {
98 68 vm_exit_during_initialization("Could not allocate reference policy object");
99 69 }
100 70 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery ||
101 71 RefDiscoveryPolicy == ReferentBasedDiscovery,
102 72 "Unrecongnized RefDiscoveryPolicy");
103 73 }
104 74
105 75 ReferenceProcessor::ReferenceProcessor(MemRegion span,
106 76 bool mt_processing,
107 77 int mt_processing_degree,
108 78 bool mt_discovery,
109 79 int mt_discovery_degree,
110 80 bool atomic_discovery,
111 81 BoolObjectClosure* is_alive_non_header,
112 82 bool discovered_list_needs_barrier) :
113 83 _discovering_refs(false),
114 84 _enqueuing_is_done(false),
115 85 _is_alive_non_header(is_alive_non_header),
↓ open down ↓ |
33 lines elided |
↑ open up ↑ |
116 86 _discovered_list_needs_barrier(discovered_list_needs_barrier),
117 87 _bs(NULL),
118 88 _processing_is_mt(mt_processing),
119 89 _next_id(0)
120 90 {
121 91 _span = span;
122 92 _discovery_is_atomic = atomic_discovery;
123 93 _discovery_is_mt = mt_discovery;
124 94 _num_q = MAX2(1, mt_processing_degree);
125 95 _max_num_q = MAX2(_num_q, mt_discovery_degree);
126 - _discoveredSoftRefs = NEW_C_HEAP_ARRAY(DiscoveredList, _max_num_q * subclasses_of_ref);
96 + _discoveredSoftRefs = NEW_C_HEAP_ARRAY(DiscoveredList, _max_num_q * subclasses_of_ref());
127 97 if (_discoveredSoftRefs == NULL) {
128 98 vm_exit_during_initialization("Could not allocated RefProc Array");
129 99 }
130 100 _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_q];
131 101 _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_q];
132 102 _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q];
133 103 assert(sentinel_ref() != NULL, "_sentinelRef is NULL");
134 104 // Initialized all entries to _sentinelRef
135 - for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
105 + for (int i = 0; i < _max_num_q * subclasses_of_ref(); i++) {
136 106 _discoveredSoftRefs[i].set_head(sentinel_ref());
137 107 _discoveredSoftRefs[i].set_length(0);
138 108 }
139 - // If we do barreirs, cache a copy of the barrier set.
109 + // If we do barriers, cache a copy of the barrier set.
140 110 if (discovered_list_needs_barrier) {
141 111 _bs = Universe::heap()->barrier_set();
142 112 }
143 113 setup_policy(false /* default soft ref policy */);
144 114 }
145 115
146 116 #ifndef PRODUCT
147 117 void ReferenceProcessor::verify_no_references_recorded() {
148 118 guarantee(!_discovering_refs, "Discovering refs?");
149 - for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
150 - guarantee(_discoveredSoftRefs[i].empty(),
119 + for (int i = 0; i < _max_num_q * subclasses_of_ref(); i++) {
120 + guarantee(_discoveredSoftRefs[i].is_empty(),
151 121 "Found non-empty discovered list");
152 122 }
153 123 }
154 124 #endif
155 125
156 126 void ReferenceProcessor::weak_oops_do(OopClosure* f) {
157 127 // Should this instead be
158 - // for (int i = 0; i < subclasses_of_ref; i++_ {
128 + // for (int i = 0; i < subclasses_of_ref(); i++_ {
159 129 // for (int j = 0; j < _num_q; j++) {
160 130 // int index = i * _max_num_q + j;
161 - for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
131 + for (int i = 0; i < _max_num_q * subclasses_of_ref(); i++) {
162 132 if (UseCompressedOops) {
163 133 f->do_oop((narrowOop*)_discoveredSoftRefs[i].adr_head());
164 134 } else {
165 135 f->do_oop((oop*)_discoveredSoftRefs[i].adr_head());
166 136 }
167 137 }
168 138 }
169 139
170 140 void ReferenceProcessor::oops_do(OopClosure* f) {
171 141 f->do_oop(adr_sentinel_ref());
172 142 }
173 143
174 144 void ReferenceProcessor::update_soft_ref_master_clock() {
175 145 // Update (advance) the soft ref master clock field. This must be done
176 146 // after processing the soft ref list.
177 147 jlong now = os::javaTimeMillis();
178 148 jlong clock = java_lang_ref_SoftReference::clock();
179 149 NOT_PRODUCT(
180 150 if (now < clock) {
181 151 warning("time warp: %d to %d", clock, now);
182 152 }
183 153 )
184 154 // In product mode, protect ourselves from system time being adjusted
185 155 // externally and going backward; see note in the implementation of
186 156 // GenCollectedHeap::time_since_last_gc() for the right way to fix
187 157 // this uniformly throughout the VM; see bug-id 4741166. XXX
188 158 if (now > clock) {
189 159 java_lang_ref_SoftReference::set_clock(now);
190 160 }
191 161 // Else leave clock stalled at its old value until time progresses
192 162 // past clock value.
193 163 }
194 164
195 165 void ReferenceProcessor::process_discovered_references(
196 166 BoolObjectClosure* is_alive,
197 167 OopClosure* keep_alive,
198 168 VoidClosure* complete_gc,
199 169 AbstractRefProcTaskExecutor* task_executor) {
200 170 NOT_PRODUCT(verify_ok_to_handle_reflists());
201 171
202 172 assert(!enqueuing_is_done(), "If here enqueuing should not be complete");
203 173 // Stop treating discovered references specially.
204 174 disable_discovery();
205 175
206 176 bool trace_time = PrintGCDetails && PrintReferenceGC;
207 177 // Soft references
208 178 {
209 179 TraceTime tt("SoftReference", trace_time, false, gclog_or_tty);
210 180 process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true,
211 181 is_alive, keep_alive, complete_gc, task_executor);
212 182 }
213 183
214 184 update_soft_ref_master_clock();
215 185
216 186 // Weak references
217 187 {
218 188 TraceTime tt("WeakReference", trace_time, false, gclog_or_tty);
219 189 process_discovered_reflist(_discoveredWeakRefs, NULL, true,
220 190 is_alive, keep_alive, complete_gc, task_executor);
221 191 }
222 192
223 193 // Final references
224 194 {
225 195 TraceTime tt("FinalReference", trace_time, false, gclog_or_tty);
226 196 process_discovered_reflist(_discoveredFinalRefs, NULL, false,
227 197 is_alive, keep_alive, complete_gc, task_executor);
228 198 }
229 199
230 200 // Phantom references
231 201 {
232 202 TraceTime tt("PhantomReference", trace_time, false, gclog_or_tty);
233 203 process_discovered_reflist(_discoveredPhantomRefs, NULL, false,
234 204 is_alive, keep_alive, complete_gc, task_executor);
235 205 }
236 206
237 207 // Weak global JNI references. It would make more sense (semantically) to
238 208 // traverse these simultaneously with the regular weak references above, but
239 209 // that is not how the JDK1.2 specification is. See #4126360. Native code can
240 210 // thus use JNI weak references to circumvent the phantom references and
241 211 // resurrect a "post-mortem" object.
242 212 {
243 213 TraceTime tt("JNI Weak Reference", trace_time, false, gclog_or_tty);
244 214 if (task_executor != NULL) {
245 215 task_executor->set_single_threaded_mode();
246 216 }
247 217 process_phaseJNI(is_alive, keep_alive, complete_gc);
248 218 }
249 219 }
250 220
251 221 #ifndef PRODUCT
252 222 // Calculate the number of jni handles.
253 223 uint ReferenceProcessor::count_jni_refs() {
254 224 class AlwaysAliveClosure: public BoolObjectClosure {
255 225 public:
256 226 virtual bool do_object_b(oop obj) { return true; }
257 227 virtual void do_object(oop obj) { assert(false, "Don't call"); }
258 228 };
259 229
260 230 class CountHandleClosure: public OopClosure {
261 231 private:
262 232 int _count;
263 233 public:
264 234 CountHandleClosure(): _count(0) {}
265 235 void do_oop(oop* unused) { _count++; }
266 236 void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
267 237 int count() { return _count; }
268 238 };
269 239 CountHandleClosure global_handle_count;
270 240 AlwaysAliveClosure always_alive;
271 241 JNIHandles::weak_oops_do(&always_alive, &global_handle_count);
272 242 return global_handle_count.count();
273 243 }
274 244 #endif
275 245
276 246 void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive,
277 247 OopClosure* keep_alive,
278 248 VoidClosure* complete_gc) {
279 249 #ifndef PRODUCT
280 250 if (PrintGCDetails && PrintReferenceGC) {
281 251 unsigned int count = count_jni_refs();
282 252 gclog_or_tty->print(", %u refs", count);
283 253 }
284 254 #endif
285 255 JNIHandles::weak_oops_do(is_alive, keep_alive);
286 256 // Finally remember to keep sentinel around
287 257 keep_alive->do_oop(adr_sentinel_ref());
288 258 complete_gc->do_void();
289 259 }
290 260
291 261
292 262 template <class T>
293 263 bool enqueue_discovered_ref_helper(ReferenceProcessor* ref,
294 264 AbstractRefProcTaskExecutor* task_executor) {
295 265
296 266 // Remember old value of pending references list
297 267 T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr();
298 268 T old_pending_list_value = *pending_list_addr;
299 269
300 270 // Enqueue references that are not made active again, and
301 271 // clear the decks for the next collection (cycle).
302 272 ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor);
303 273 // Do the oop-check on pending_list_addr missed in
304 274 // enqueue_discovered_reflist. We should probably
305 275 // do a raw oop_check so that future such idempotent
306 276 // oop_stores relying on the oop-check side-effect
307 277 // may be elided automatically and safely without
308 278 // affecting correctness.
309 279 oop_store(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr));
310 280
311 281 // Stop treating discovered references specially.
312 282 ref->disable_discovery();
313 283
314 284 // Return true if new pending references were added
315 285 return old_pending_list_value != *pending_list_addr;
316 286 }
317 287
318 288 bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) {
319 289 NOT_PRODUCT(verify_ok_to_handle_reflists());
320 290 if (UseCompressedOops) {
321 291 return enqueue_discovered_ref_helper<narrowOop>(this, task_executor);
322 292 } else {
323 293 return enqueue_discovered_ref_helper<oop>(this, task_executor);
324 294 }
325 295 }
326 296
327 297 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list,
328 298 HeapWord* pending_list_addr) {
329 299 // Given a list of refs linked through the "discovered" field
330 300 // (java.lang.ref.Reference.discovered) chain them through the
331 301 // "next" field (java.lang.ref.Reference.next) and prepend
332 302 // to the pending list.
333 303 if (TraceReferenceGC && PrintGCDetails) {
334 304 gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list "
335 305 INTPTR_FORMAT, (address)refs_list.head());
336 306 }
337 307 oop obj = refs_list.head();
338 308 // Walk down the list, copying the discovered field into
339 309 // the next field and clearing it (except for the last
340 310 // non-sentinel object which is treated specially to avoid
341 311 // confusion with an active reference).
342 312 while (obj != sentinel_ref()) {
343 313 assert(obj->is_instanceRef(), "should be reference object");
344 314 oop next = java_lang_ref_Reference::discovered(obj);
345 315 if (TraceReferenceGC && PrintGCDetails) {
346 316 gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next " INTPTR_FORMAT,
347 317 obj, next);
348 318 }
349 319 assert(java_lang_ref_Reference::next(obj) == NULL,
350 320 "The reference should not be enqueued");
351 321 if (next == sentinel_ref()) { // obj is last
352 322 // Swap refs_list into pendling_list_addr and
353 323 // set obj's next to what we read from pending_list_addr.
354 324 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
355 325 // Need oop_check on pending_list_addr above;
356 326 // see special oop-check code at the end of
357 327 // enqueue_discovered_reflists() further below.
358 328 if (old == NULL) {
359 329 // obj should be made to point to itself, since
360 330 // pending list was empty.
361 331 java_lang_ref_Reference::set_next(obj, obj);
362 332 } else {
363 333 java_lang_ref_Reference::set_next(obj, old);
364 334 }
365 335 } else {
366 336 java_lang_ref_Reference::set_next(obj, next);
367 337 }
368 338 java_lang_ref_Reference::set_discovered(obj, (oop) NULL);
369 339 obj = next;
370 340 }
371 341 }
372 342
373 343 // Parallel enqueue task
374 344 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask {
375 345 public:
376 346 RefProcEnqueueTask(ReferenceProcessor& ref_processor,
377 347 DiscoveredList discovered_refs[],
378 348 HeapWord* pending_list_addr,
379 349 oop sentinel_ref,
380 350 int n_queues)
381 351 : EnqueueTask(ref_processor, discovered_refs,
382 352 pending_list_addr, sentinel_ref, n_queues)
383 353 { }
384 354
↓ open down ↓ |
213 lines elided |
↑ open up ↑ |
385 355 virtual void work(unsigned int work_id) {
386 356 assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds");
387 357 // Simplest first cut: static partitioning.
388 358 int index = work_id;
389 359 // The increment on "index" must correspond to the maximum number of queues
390 360 // (n_queues) with which that ReferenceProcessor was created. That
391 361 // is because of the "clever" way the discovered references lists were
392 362 // allocated and are indexed into.
393 363 assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected");
394 364 for (int j = 0;
395 - j < subclasses_of_ref;
365 + j < ReferenceProcessor::subclasses_of_ref();
396 366 j++, index += _n_queues) {
397 367 _ref_processor.enqueue_discovered_reflist(
398 368 _refs_lists[index], _pending_list_addr);
399 369 _refs_lists[index].set_head(_sentinel_ref);
400 370 _refs_lists[index].set_length(0);
401 371 }
402 372 }
403 373 };
404 374
405 375 // Enqueue references that are not made active again
406 376 void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr,
407 377 AbstractRefProcTaskExecutor* task_executor) {
408 378 if (_processing_is_mt && task_executor != NULL) {
409 379 // Parallel code
410 380 RefProcEnqueueTask tsk(*this, _discoveredSoftRefs,
411 381 pending_list_addr, sentinel_ref(), _max_num_q);
412 382 task_executor->execute(tsk);
413 383 } else {
414 384 // Serial code: call the parent class's implementation
415 - for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
385 + for (int i = 0; i < _max_num_q * subclasses_of_ref(); i++) {
416 386 enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr);
417 387 _discoveredSoftRefs[i].set_head(sentinel_ref());
418 388 _discoveredSoftRefs[i].set_length(0);
419 389 }
420 390 }
421 391 }
422 392
423 393 // Iterator for the list of discovered references.
424 394 class DiscoveredListIterator {
425 395 public:
426 396 inline DiscoveredListIterator(DiscoveredList& refs_list,
427 397 OopClosure* keep_alive,
428 398 BoolObjectClosure* is_alive);
429 399
430 400 // End Of List.
431 401 inline bool has_next() const { return _next != ReferenceProcessor::sentinel_ref(); }
432 402
433 403 // Get oop to the Reference object.
434 404 inline oop obj() const { return _ref; }
435 405
436 406 // Get oop to the referent object.
437 407 inline oop referent() const { return _referent; }
438 408
439 409 // Returns true if referent is alive.
440 410 inline bool is_referent_alive() const;
441 411
442 412 // Loads data for the current reference.
443 413 // The "allow_null_referent" argument tells us to allow for the possibility
444 414 // of a NULL referent in the discovered Reference object. This typically
445 415 // happens in the case of concurrent collectors that may have done the
↓ open down ↓ |
20 lines elided |
↑ open up ↑ |
446 416 // discovery concurrently, or interleaved, with mutator execution.
447 417 inline void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
448 418
449 419 // Move to the next discovered reference.
450 420 inline void next();
451 421
452 422 // Remove the current reference from the list
453 423 inline void remove();
454 424
455 425 // Make the Reference object active again.
456 - inline void make_active() { java_lang_ref_Reference::set_next(_ref, NULL); }
426 + inline void make_active() {
427 + // For G1 we don't want to use set_next - it
428 + // will dirty the card for the next field of
429 + // the reference object and will fail
430 + // CT verification.
431 + if (UseG1GC) {
432 + BarrierSet* bs = oopDesc::bs();
433 + HeapWord* next_addr = java_lang_ref_Reference::next_addr(_ref);
434 +
435 + if (UseCompressedOops) {
436 + bs->write_ref_field_pre((narrowOop*)next_addr, NULL);
437 + } else {
438 + bs->write_ref_field_pre((oop*)next_addr, NULL);
439 + }
440 + java_lang_ref_Reference::set_next_raw(_ref, NULL);
441 + } else {
442 + java_lang_ref_Reference::set_next(_ref, NULL);
443 + }
444 + }
457 445
458 446 // Make the referent alive.
459 447 inline void make_referent_alive() {
460 448 if (UseCompressedOops) {
461 449 _keep_alive->do_oop((narrowOop*)_referent_addr);
462 450 } else {
463 451 _keep_alive->do_oop((oop*)_referent_addr);
464 452 }
465 453 }
466 454
467 455 // Update the discovered field.
468 456 inline void update_discovered() {
469 457 // First _prev_next ref actually points into DiscoveredList (gross).
470 458 if (UseCompressedOops) {
471 459 _keep_alive->do_oop((narrowOop*)_prev_next);
472 460 } else {
473 461 _keep_alive->do_oop((oop*)_prev_next);
474 462 }
475 463 }
476 464
477 465 // NULL out referent pointer.
478 466 inline void clear_referent() { oop_store_raw(_referent_addr, NULL); }
479 467
480 468 // Statistics
481 469 NOT_PRODUCT(
482 470 inline size_t processed() const { return _processed; }
483 471 inline size_t removed() const { return _removed; }
484 472 )
485 473
486 474 inline void move_to_next();
487 475
↓ open down ↓ |
21 lines elided |
↑ open up ↑ |
488 476 private:
489 477 DiscoveredList& _refs_list;
490 478 HeapWord* _prev_next;
491 479 oop _ref;
492 480 HeapWord* _discovered_addr;
493 481 oop _next;
494 482 HeapWord* _referent_addr;
495 483 oop _referent;
496 484 OopClosure* _keep_alive;
497 485 BoolObjectClosure* _is_alive;
486 +
498 487 DEBUG_ONLY(
499 488 oop _first_seen; // cyclic linked list check
500 489 )
490 +
501 491 NOT_PRODUCT(
502 492 size_t _processed;
503 493 size_t _removed;
504 494 )
505 495 };
506 496
507 497 inline DiscoveredListIterator::DiscoveredListIterator(DiscoveredList& refs_list,
508 498 OopClosure* keep_alive,
509 - BoolObjectClosure* is_alive)
510 - : _refs_list(refs_list),
499 + BoolObjectClosure* is_alive) :
500 + _refs_list(refs_list),
511 501 _prev_next(refs_list.adr_head()),
512 502 _ref(refs_list.head()),
513 503 #ifdef ASSERT
514 504 _first_seen(refs_list.head()),
515 505 #endif
516 506 #ifndef PRODUCT
517 507 _processed(0),
518 508 _removed(0),
519 509 #endif
520 510 _next(refs_list.head()),
521 511 _keep_alive(keep_alive),
522 512 _is_alive(is_alive)
523 513 { }
524 514
525 515 inline bool DiscoveredListIterator::is_referent_alive() const {
526 516 return _is_alive->do_object_b(_referent);
527 517 }
528 518
529 519 inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
530 520 _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref);
531 521 oop discovered = java_lang_ref_Reference::discovered(_ref);
532 522 assert(_discovered_addr && discovered->is_oop_or_null(),
533 523 "discovered field is bad");
534 524 _next = discovered;
535 525 _referent_addr = java_lang_ref_Reference::referent_addr(_ref);
536 526 _referent = java_lang_ref_Reference::referent(_ref);
537 527 assert(Universe::heap()->is_in_reserved_or_null(_referent),
538 528 "Wrong oop found in java.lang.Reference object");
539 529 assert(allow_null_referent ?
540 530 _referent->is_oop_or_null()
541 531 : _referent->is_oop(),
542 532 "bad referent");
543 533 }
544 534
545 535 inline void DiscoveredListIterator::next() {
546 536 _prev_next = _discovered_addr;
547 537 move_to_next();
548 538 }
549 539
550 540 inline void DiscoveredListIterator::remove() {
551 541 assert(_ref->is_oop(), "Dropping a bad reference");
552 542 oop_store_raw(_discovered_addr, NULL);
553 543 // First _prev_next ref actually points into DiscoveredList (gross).
554 544 if (UseCompressedOops) {
555 545 // Remove Reference object from list.
556 546 oopDesc::encode_store_heap_oop_not_null((narrowOop*)_prev_next, _next);
557 547 } else {
558 548 // Remove Reference object from list.
559 549 oopDesc::store_heap_oop((oop*)_prev_next, _next);
560 550 }
561 551 NOT_PRODUCT(_removed++);
562 552 _refs_list.dec_length(1);
563 553 }
564 554
565 555 inline void DiscoveredListIterator::move_to_next() {
566 556 _ref = _next;
567 557 assert(_ref != _first_seen, "cyclic ref_list found");
568 558 NOT_PRODUCT(_processed++);
569 559 }
570 560
571 561 // NOTE: process_phase*() are largely similar, and at a high level
572 562 // merely iterate over the extant list applying a predicate to
573 563 // each of its elements and possibly removing that element from the
574 564 // list and applying some further closures to that element.
575 565 // We should consider the possibility of replacing these
576 566 // process_phase*() methods by abstracting them into
577 567 // a single general iterator invocation that receives appropriate
578 568 // closures that accomplish this work.
579 569
580 570 // (SoftReferences only) Traverse the list and remove any SoftReferences whose
581 571 // referents are not alive, but that should be kept alive for policy reasons.
582 572 // Keep alive the transitive closure of all such referents.
583 573 void
584 574 ReferenceProcessor::process_phase1(DiscoveredList& refs_list,
585 575 ReferencePolicy* policy,
586 576 BoolObjectClosure* is_alive,
587 577 OopClosure* keep_alive,
588 578 VoidClosure* complete_gc) {
589 579 assert(policy != NULL, "Must have a non-NULL policy");
590 580 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
591 581 // Decide which softly reachable refs should be kept alive.
592 582 while (iter.has_next()) {
593 583 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));
594 584 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive();
595 585 if (referent_is_dead && !policy->should_clear_reference(iter.obj())) {
596 586 if (TraceReferenceGC) {
597 587 gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy",
598 588 iter.obj(), iter.obj()->blueprint()->internal_name());
599 589 }
600 590 // Remove Reference object from list
601 591 iter.remove();
602 592 // Make the Reference object active again
603 593 iter.make_active();
604 594 // keep the referent around
605 595 iter.make_referent_alive();
606 596 iter.move_to_next();
607 597 } else {
608 598 iter.next();
609 599 }
610 600 }
611 601 // Close the reachable set
612 602 complete_gc->do_void();
613 603 NOT_PRODUCT(
614 604 if (PrintGCDetails && TraceReferenceGC) {
615 605 gclog_or_tty->print_cr(" Dropped %d dead Refs out of %d "
616 606 "discovered Refs by policy list " INTPTR_FORMAT,
617 607 iter.removed(), iter.processed(), (address)refs_list.head());
618 608 }
619 609 )
620 610 }
621 611
622 612 // Traverse the list and remove any Refs that are not active, or
623 613 // whose referents are either alive or NULL.
624 614 void
625 615 ReferenceProcessor::pp2_work(DiscoveredList& refs_list,
626 616 BoolObjectClosure* is_alive,
627 617 OopClosure* keep_alive) {
628 618 assert(discovery_is_atomic(), "Error");
629 619 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
630 620 while (iter.has_next()) {
631 621 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
632 622 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());)
633 623 assert(next == NULL, "Should not discover inactive Reference");
634 624 if (iter.is_referent_alive()) {
635 625 if (TraceReferenceGC) {
636 626 gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)",
637 627 iter.obj(), iter.obj()->blueprint()->internal_name());
638 628 }
639 629 // The referent is reachable after all.
640 630 // Remove Reference object from list.
641 631 iter.remove();
642 632 // Update the referent pointer as necessary: Note that this
643 633 // should not entail any recursive marking because the
644 634 // referent must already have been traversed.
645 635 iter.make_referent_alive();
646 636 iter.move_to_next();
647 637 } else {
648 638 iter.next();
649 639 }
650 640 }
651 641 NOT_PRODUCT(
652 642 if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) {
653 643 gclog_or_tty->print_cr(" Dropped %d active Refs out of %d "
654 644 "Refs in discovered list " INTPTR_FORMAT,
655 645 iter.removed(), iter.processed(), (address)refs_list.head());
656 646 }
657 647 )
658 648 }
659 649
660 650 void
661 651 ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list,
662 652 BoolObjectClosure* is_alive,
663 653 OopClosure* keep_alive,
664 654 VoidClosure* complete_gc) {
665 655 assert(!discovery_is_atomic(), "Error");
666 656 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
667 657 while (iter.has_next()) {
668 658 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
669 659 HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj());
670 660 oop next = java_lang_ref_Reference::next(iter.obj());
671 661 if ((iter.referent() == NULL || iter.is_referent_alive() ||
672 662 next != NULL)) {
673 663 assert(next->is_oop_or_null(), "bad next field");
674 664 // Remove Reference object from list
675 665 iter.remove();
676 666 // Trace the cohorts
677 667 iter.make_referent_alive();
678 668 if (UseCompressedOops) {
679 669 keep_alive->do_oop((narrowOop*)next_addr);
680 670 } else {
681 671 keep_alive->do_oop((oop*)next_addr);
682 672 }
683 673 iter.move_to_next();
684 674 } else {
685 675 iter.next();
686 676 }
687 677 }
688 678 // Now close the newly reachable set
689 679 complete_gc->do_void();
690 680 NOT_PRODUCT(
691 681 if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) {
692 682 gclog_or_tty->print_cr(" Dropped %d active Refs out of %d "
693 683 "Refs in discovered list " INTPTR_FORMAT,
694 684 iter.removed(), iter.processed(), (address)refs_list.head());
695 685 }
696 686 )
697 687 }
698 688
699 689 // Traverse the list and process the referents, by either
700 690 // clearing them or keeping them (and their reachable
701 691 // closure) alive.
702 692 void
703 693 ReferenceProcessor::process_phase3(DiscoveredList& refs_list,
704 694 bool clear_referent,
705 695 BoolObjectClosure* is_alive,
706 696 OopClosure* keep_alive,
707 697 VoidClosure* complete_gc) {
708 698 ResourceMark rm;
709 699 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
710 700 while (iter.has_next()) {
711 701 iter.update_discovered();
712 702 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
713 703 if (clear_referent) {
714 704 // NULL out referent pointer
715 705 iter.clear_referent();
716 706 } else {
717 707 // keep the referent around
718 708 iter.make_referent_alive();
719 709 }
720 710 if (TraceReferenceGC) {
721 711 gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending",
722 712 clear_referent ? "cleared " : "",
723 713 iter.obj(), iter.obj()->blueprint()->internal_name());
724 714 }
725 715 assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference");
726 716 iter.next();
727 717 }
728 718 // Remember to keep sentinel pointer around
729 719 iter.update_discovered();
730 720 // Close the reachable set
731 721 complete_gc->do_void();
732 722 }
733 723
734 724 void
735 725 ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) {
736 726 oop obj = refs_list.head();
737 727 while (obj != sentinel_ref()) {
↓ open down ↓ |
217 lines elided |
↑ open up ↑ |
738 728 oop discovered = java_lang_ref_Reference::discovered(obj);
739 729 java_lang_ref_Reference::set_discovered_raw(obj, NULL);
740 730 obj = discovered;
741 731 }
742 732 refs_list.set_head(sentinel_ref());
743 733 refs_list.set_length(0);
744 734 }
745 735
746 736 void ReferenceProcessor::abandon_partial_discovery() {
747 737 // loop over the lists
748 - for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
738 + for (int i = 0; i < _max_num_q * subclasses_of_ref(); i++) {
749 739 if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
750 - gclog_or_tty->print_cr("\nAbandoning %s discovered list",
751 - list_name(i));
740 + gclog_or_tty->print_cr("\nAbandoning %s discovered list", list_name(i));
752 741 }
753 742 abandon_partial_discovered_list(_discoveredSoftRefs[i]);
754 743 }
755 744 }
756 745
757 746 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask {
758 747 public:
759 748 RefProcPhase1Task(ReferenceProcessor& ref_processor,
760 749 DiscoveredList refs_lists[],
761 750 ReferencePolicy* policy,
762 751 bool marks_oops_alive)
763 752 : ProcessTask(ref_processor, refs_lists, marks_oops_alive),
764 753 _policy(policy)
765 754 { }
766 755 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
767 756 OopClosure& keep_alive,
768 757 VoidClosure& complete_gc)
769 758 {
770 759 Thread* thr = Thread::current();
771 760 int refs_list_index = ((WorkerThread*)thr)->id();
772 761 _ref_processor.process_phase1(_refs_lists[refs_list_index], _policy,
773 762 &is_alive, &keep_alive, &complete_gc);
774 763 }
775 764 private:
776 765 ReferencePolicy* _policy;
777 766 };
778 767
779 768 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask {
780 769 public:
781 770 RefProcPhase2Task(ReferenceProcessor& ref_processor,
782 771 DiscoveredList refs_lists[],
783 772 bool marks_oops_alive)
784 773 : ProcessTask(ref_processor, refs_lists, marks_oops_alive)
785 774 { }
786 775 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
787 776 OopClosure& keep_alive,
788 777 VoidClosure& complete_gc)
789 778 {
790 779 _ref_processor.process_phase2(_refs_lists[i],
791 780 &is_alive, &keep_alive, &complete_gc);
792 781 }
793 782 };
794 783
795 784 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask {
796 785 public:
797 786 RefProcPhase3Task(ReferenceProcessor& ref_processor,
798 787 DiscoveredList refs_lists[],
799 788 bool clear_referent,
800 789 bool marks_oops_alive)
801 790 : ProcessTask(ref_processor, refs_lists, marks_oops_alive),
802 791 _clear_referent(clear_referent)
803 792 { }
804 793 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
805 794 OopClosure& keep_alive,
806 795 VoidClosure& complete_gc)
807 796 {
808 797 // Don't use "refs_list_index" calculated in this way because
809 798 // balance_queues() has moved the Ref's into the first n queues.
810 799 // Thread* thr = Thread::current();
811 800 // int refs_list_index = ((WorkerThread*)thr)->id();
812 801 // _ref_processor.process_phase3(_refs_lists[refs_list_index], _clear_referent,
813 802 _ref_processor.process_phase3(_refs_lists[i], _clear_referent,
814 803 &is_alive, &keep_alive, &complete_gc);
815 804 }
816 805 private:
817 806 bool _clear_referent;
818 807 };
819 808
820 809 // Balances reference queues.
821 810 // Move entries from all queues[0, 1, ..., _max_num_q-1] to
822 811 // queues[0, 1, ..., _num_q-1] because only the first _num_q
823 812 // corresponding to the active workers will be processed.
824 813 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[])
825 814 {
826 815 // calculate total length
827 816 size_t total_refs = 0;
828 817 if (TraceReferenceGC && PrintGCDetails) {
829 818 gclog_or_tty->print_cr("\nBalance ref_lists ");
830 819 }
831 820
832 821 for (int i = 0; i < _max_num_q; ++i) {
833 822 total_refs += ref_lists[i].length();
834 823 if (TraceReferenceGC && PrintGCDetails) {
835 824 gclog_or_tty->print("%d ", ref_lists[i].length());
836 825 }
837 826 }
838 827 if (TraceReferenceGC && PrintGCDetails) {
839 828 gclog_or_tty->print_cr(" = %d", total_refs);
840 829 }
841 830 size_t avg_refs = total_refs / _num_q + 1;
842 831 int to_idx = 0;
843 832 for (int from_idx = 0; from_idx < _max_num_q; from_idx++) {
844 833 bool move_all = false;
845 834 if (from_idx >= _num_q) {
846 835 move_all = ref_lists[from_idx].length() > 0;
847 836 }
848 837 while ((ref_lists[from_idx].length() > avg_refs) ||
849 838 move_all) {
850 839 assert(to_idx < _num_q, "Sanity Check!");
851 840 if (ref_lists[to_idx].length() < avg_refs) {
852 841 // move superfluous refs
853 842 size_t refs_to_move;
854 843 // Move all the Ref's if the from queue will not be processed.
855 844 if (move_all) {
856 845 refs_to_move = MIN2(ref_lists[from_idx].length(),
857 846 avg_refs - ref_lists[to_idx].length());
858 847 } else {
859 848 refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs,
↓ open down ↓ |
98 lines elided |
↑ open up ↑ |
860 849 avg_refs - ref_lists[to_idx].length());
861 850 }
862 851 oop move_head = ref_lists[from_idx].head();
863 852 oop move_tail = move_head;
864 853 oop new_head = move_head;
865 854 // find an element to split the list on
866 855 for (size_t j = 0; j < refs_to_move; ++j) {
867 856 move_tail = new_head;
868 857 new_head = java_lang_ref_Reference::discovered(new_head);
869 858 }
870 - java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head());
859 +
860 + if (_discovered_list_needs_barrier) {
861 + java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head());
862 + } else {
863 + HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(move_tail);
864 + oop_store_raw(discovered_addr, ref_lists[to_idx].head());
865 + }
866 +
871 867 ref_lists[to_idx].set_head(move_head);
872 868 ref_lists[to_idx].inc_length(refs_to_move);
873 869 ref_lists[from_idx].set_head(new_head);
874 870 ref_lists[from_idx].dec_length(refs_to_move);
875 871 if (ref_lists[from_idx].length() == 0) {
876 872 break;
877 873 }
878 874 } else {
879 875 to_idx = (to_idx + 1) % _num_q;
880 876 }
881 877 }
882 878 }
883 879 #ifdef ASSERT
884 880 size_t balanced_total_refs = 0;
885 881 for (int i = 0; i < _max_num_q; ++i) {
886 882 balanced_total_refs += ref_lists[i].length();
887 883 if (TraceReferenceGC && PrintGCDetails) {
888 884 gclog_or_tty->print("%d ", ref_lists[i].length());
889 885 }
890 886 }
891 887 if (TraceReferenceGC && PrintGCDetails) {
892 888 gclog_or_tty->print_cr(" = %d", balanced_total_refs);
893 889 gclog_or_tty->flush();
894 890 }
895 891 assert(total_refs == balanced_total_refs, "Balancing was incomplete");
896 892 #endif
897 893 }
898 894
899 895 void ReferenceProcessor::balance_all_queues() {
900 896 balance_queues(_discoveredSoftRefs);
901 897 balance_queues(_discoveredWeakRefs);
902 898 balance_queues(_discoveredFinalRefs);
903 899 balance_queues(_discoveredPhantomRefs);
904 900 }
905 901
906 902 void
907 903 ReferenceProcessor::process_discovered_reflist(
908 904 DiscoveredList refs_lists[],
909 905 ReferencePolicy* policy,
910 906 bool clear_referent,
911 907 BoolObjectClosure* is_alive,
912 908 OopClosure* keep_alive,
913 909 VoidClosure* complete_gc,
914 910 AbstractRefProcTaskExecutor* task_executor)
915 911 {
916 912 bool mt_processing = task_executor != NULL && _processing_is_mt;
917 913 // If discovery used MT and a dynamic number of GC threads, then
918 914 // the queues must be balanced for correctness if fewer than the
919 915 // maximum number of queues were used. The number of queue used
920 916 // during discovery may be different than the number to be used
921 917 // for processing so don't depend of _num_q < _max_num_q as part
922 918 // of the test.
923 919 bool must_balance = _discovery_is_mt;
924 920
925 921 if ((mt_processing && ParallelRefProcBalancingEnabled) ||
926 922 must_balance) {
927 923 balance_queues(refs_lists);
928 924 }
929 925 if (PrintReferenceGC && PrintGCDetails) {
930 926 size_t total = 0;
931 927 for (int i = 0; i < _max_num_q; ++i) {
932 928 total += refs_lists[i].length();
933 929 }
934 930 gclog_or_tty->print(", %u refs", total);
935 931 }
936 932
937 933 // Phase 1 (soft refs only):
938 934 // . Traverse the list and remove any SoftReferences whose
939 935 // referents are not alive, but that should be kept alive for
940 936 // policy reasons. Keep alive the transitive closure of all
941 937 // such referents.
942 938 if (policy != NULL) {
943 939 if (mt_processing) {
944 940 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/);
945 941 task_executor->execute(phase1);
946 942 } else {
947 943 for (int i = 0; i < _max_num_q; i++) {
948 944 process_phase1(refs_lists[i], policy,
949 945 is_alive, keep_alive, complete_gc);
950 946 }
951 947 }
952 948 } else { // policy == NULL
953 949 assert(refs_lists != _discoveredSoftRefs,
954 950 "Policy must be specified for soft references.");
955 951 }
956 952
957 953 // Phase 2:
958 954 // . Traverse the list and remove any refs whose referents are alive.
959 955 if (mt_processing) {
960 956 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/);
961 957 task_executor->execute(phase2);
962 958 } else {
963 959 for (int i = 0; i < _max_num_q; i++) {
964 960 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc);
965 961 }
966 962 }
967 963
968 964 // Phase 3:
969 965 // . Traverse the list and process referents as appropriate.
970 966 if (mt_processing) {
971 967 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/);
972 968 task_executor->execute(phase3);
973 969 } else {
↓ open down ↓ |
93 lines elided |
↑ open up ↑ |
974 970 for (int i = 0; i < _max_num_q; i++) {
975 971 process_phase3(refs_lists[i], clear_referent,
976 972 is_alive, keep_alive, complete_gc);
977 973 }
978 974 }
979 975 }
980 976
981 977 void ReferenceProcessor::clean_up_discovered_references() {
982 978 // loop over the lists
983 979 // Should this instead be
984 - // for (int i = 0; i < subclasses_of_ref; i++_ {
980 + // for (int i = 0; i < subclasses_of_ref(); i++) {
985 981 // for (int j = 0; j < _num_q; j++) {
986 982 // int index = i * _max_num_q + j;
987 - for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
983 + for (int i = 0; i < _max_num_q * subclasses_of_ref(); i++) {
988 984 if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
989 985 gclog_or_tty->print_cr(
990 986 "\nScrubbing %s discovered list of Null referents",
991 987 list_name(i));
992 988 }
993 989 clean_up_discovered_reflist(_discoveredSoftRefs[i]);
994 990 }
995 991 }
996 992
997 993 void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) {
998 994 assert(!discovery_is_atomic(), "Else why call this method?");
999 995 DiscoveredListIterator iter(refs_list, NULL, NULL);
1000 996 while (iter.has_next()) {
1001 997 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
1002 998 oop next = java_lang_ref_Reference::next(iter.obj());
1003 999 assert(next->is_oop_or_null(), "bad next field");
1004 1000 // If referent has been cleared or Reference is not active,
1005 1001 // drop it.
1006 1002 if (iter.referent() == NULL || next != NULL) {
1007 1003 debug_only(
1008 1004 if (PrintGCDetails && TraceReferenceGC) {
1009 1005 gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: "
1010 1006 INTPTR_FORMAT " with next field: " INTPTR_FORMAT
1011 1007 " and referent: " INTPTR_FORMAT,
1012 1008 iter.obj(), next, iter.referent());
1013 1009 }
1014 1010 )
1015 1011 // Remove Reference object from list
1016 1012 iter.remove();
1017 1013 iter.move_to_next();
1018 1014 } else {
1019 1015 iter.next();
1020 1016 }
1021 1017 }
1022 1018 NOT_PRODUCT(
1023 1019 if (PrintGCDetails && TraceReferenceGC) {
1024 1020 gclog_or_tty->print(
1025 1021 " Removed %d Refs with NULL referents out of %d discovered Refs",
1026 1022 iter.removed(), iter.processed());
1027 1023 }
1028 1024 )
1029 1025 }
1030 1026
1031 1027 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) {
1032 1028 int id = 0;
1033 1029 // Determine the queue index to use for this object.
1034 1030 if (_discovery_is_mt) {
1035 1031 // During a multi-threaded discovery phase,
1036 1032 // each thread saves to its "own" list.
1037 1033 Thread* thr = Thread::current();
1038 1034 id = thr->as_Worker_thread()->id();
1039 1035 } else {
1040 1036 // single-threaded discovery, we save in round-robin
1041 1037 // fashion to each of the lists.
1042 1038 if (_processing_is_mt) {
1043 1039 id = next_id();
1044 1040 }
1045 1041 }
1046 1042 assert(0 <= id && id < _max_num_q, "Id is out-of-bounds (call Freud?)");
1047 1043
1048 1044 // Get the discovered queue to which we will add
1049 1045 DiscoveredList* list = NULL;
1050 1046 switch (rt) {
1051 1047 case REF_OTHER:
1052 1048 // Unknown reference type, no special treatment
1053 1049 break;
1054 1050 case REF_SOFT:
1055 1051 list = &_discoveredSoftRefs[id];
1056 1052 break;
1057 1053 case REF_WEAK:
1058 1054 list = &_discoveredWeakRefs[id];
1059 1055 break;
1060 1056 case REF_FINAL:
1061 1057 list = &_discoveredFinalRefs[id];
1062 1058 break;
1063 1059 case REF_PHANTOM:
1064 1060 list = &_discoveredPhantomRefs[id];
1065 1061 break;
1066 1062 case REF_NONE:
1067 1063 // we should not reach here if we are an instanceRefKlass
1068 1064 default:
1069 1065 ShouldNotReachHere();
1070 1066 }
1071 1067 if (TraceReferenceGC && PrintGCDetails) {
1072 1068 gclog_or_tty->print_cr("Thread %d gets list " INTPTR_FORMAT, id, list);
1073 1069 }
1074 1070 return list;
1075 1071 }
1076 1072
1077 1073 inline void
1078 1074 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
1079 1075 oop obj,
1080 1076 HeapWord* discovered_addr) {
1081 1077 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller");
1082 1078 // First we must make sure this object is only enqueued once. CAS in a non null
1083 1079 // discovered_addr.
1084 1080 oop current_head = refs_list.head();
1085 1081
1086 1082 // Note: In the case of G1, this specific pre-barrier is strictly
1087 1083 // not necessary because the only case we are interested in
1088 1084 // here is when *discovered_addr is NULL (see the CAS further below),
1089 1085 // so this will expand to nothing. As a result, we have manually
1090 1086 // elided this out for G1, but left in the test for some future
1091 1087 // collector that might have need for a pre-barrier here.
1092 1088 if (_discovered_list_needs_barrier && !UseG1GC) {
1093 1089 if (UseCompressedOops) {
1094 1090 _bs->write_ref_field_pre((narrowOop*)discovered_addr, current_head);
1095 1091 } else {
1096 1092 _bs->write_ref_field_pre((oop*)discovered_addr, current_head);
1097 1093 }
1098 1094 guarantee(false, "Need to check non-G1 collector");
1099 1095 }
1100 1096 oop retest = oopDesc::atomic_compare_exchange_oop(current_head, discovered_addr,
1101 1097 NULL);
1102 1098 if (retest == NULL) {
1103 1099 // This thread just won the right to enqueue the object.
1104 1100 // We have separate lists for enqueueing so no synchronization
1105 1101 // is necessary.
1106 1102 refs_list.set_head(obj);
1107 1103 refs_list.inc_length(1);
1108 1104 if (_discovered_list_needs_barrier) {
1109 1105 _bs->write_ref_field((void*)discovered_addr, current_head);
1110 1106 }
1111 1107
1112 1108 if (TraceReferenceGC) {
1113 1109 gclog_or_tty->print_cr("Enqueued reference (mt) (" INTPTR_FORMAT ": %s)",
1114 1110 obj, obj->blueprint()->internal_name());
1115 1111 }
1116 1112 } else {
1117 1113 // If retest was non NULL, another thread beat us to it:
1118 1114 // The reference has already been discovered...
1119 1115 if (TraceReferenceGC) {
1120 1116 gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)",
1121 1117 obj, obj->blueprint()->internal_name());
1122 1118 }
1123 1119 }
1124 1120 }
1125 1121
1126 1122 #ifndef PRODUCT
1127 1123 // Non-atomic (i.e. concurrent) discovery might allow us
1128 1124 // to observe j.l.References with NULL referents, being those
1129 1125 // cleared concurrently by mutators during (or after) discovery.
1130 1126 void ReferenceProcessor::verify_referent(oop obj) {
1131 1127 bool da = discovery_is_atomic();
1132 1128 oop referent = java_lang_ref_Reference::referent(obj);
1133 1129 assert(da ? referent->is_oop() : referent->is_oop_or_null(),
1134 1130 err_msg("Bad referent " INTPTR_FORMAT " found in Reference "
1135 1131 INTPTR_FORMAT " during %satomic discovery ",
1136 1132 (intptr_t)referent, (intptr_t)obj, da ? "" : "non-"));
1137 1133 }
1138 1134 #endif
1139 1135
1140 1136 // We mention two of several possible choices here:
1141 1137 // #0: if the reference object is not in the "originating generation"
1142 1138 // (or part of the heap being collected, indicated by our "span"
1143 1139 // we don't treat it specially (i.e. we scan it as we would
1144 1140 // a normal oop, treating its references as strong references).
1145 1141 // This means that references can't be enqueued unless their
1146 1142 // referent is also in the same span. This is the simplest,
1147 1143 // most "local" and most conservative approach, albeit one
1148 1144 // that may cause weak references to be enqueued least promptly.
1149 1145 // We call this choice the "ReferenceBasedDiscovery" policy.
1150 1146 // #1: the reference object may be in any generation (span), but if
1151 1147 // the referent is in the generation (span) being currently collected
1152 1148 // then we can discover the reference object, provided
1153 1149 // the object has not already been discovered by
1154 1150 // a different concurrently running collector (as may be the
1155 1151 // case, for instance, if the reference object is in CMS and
1156 1152 // the referent in DefNewGeneration), and provided the processing
1157 1153 // of this reference object by the current collector will
1158 1154 // appear atomic to every other collector in the system.
1159 1155 // (Thus, for instance, a concurrent collector may not
1160 1156 // discover references in other generations even if the
1161 1157 // referent is in its own generation). This policy may,
1162 1158 // in certain cases, enqueue references somewhat sooner than
1163 1159 // might Policy #0 above, but at marginally increased cost
1164 1160 // and complexity in processing these references.
1165 1161 // We call this choice the "RefeferentBasedDiscovery" policy.
1166 1162 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
1167 1163 // We enqueue references only if we are discovering refs
1168 1164 // (rather than processing discovered refs).
1169 1165 if (!_discovering_refs || !RegisterReferences) {
1170 1166 return false;
1171 1167 }
1172 1168 // We only enqueue active references.
1173 1169 oop next = java_lang_ref_Reference::next(obj);
1174 1170 if (next != NULL) {
1175 1171 return false;
1176 1172 }
1177 1173
1178 1174 HeapWord* obj_addr = (HeapWord*)obj;
1179 1175 if (RefDiscoveryPolicy == ReferenceBasedDiscovery &&
1180 1176 !_span.contains(obj_addr)) {
1181 1177 // Reference is not in the originating generation;
1182 1178 // don't treat it specially (i.e. we want to scan it as a normal
1183 1179 // object with strong references).
1184 1180 return false;
1185 1181 }
1186 1182
1187 1183 // We only enqueue references whose referents are not (yet) strongly
1188 1184 // reachable.
1189 1185 if (is_alive_non_header() != NULL) {
1190 1186 verify_referent(obj);
1191 1187 if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) {
1192 1188 return false; // referent is reachable
1193 1189 }
1194 1190 }
1195 1191 if (rt == REF_SOFT) {
1196 1192 // For soft refs we can decide now if these are not
1197 1193 // current candidates for clearing, in which case we
↓ open down ↓ |
200 lines elided |
↑ open up ↑ |
1198 1194 // can mark through them now, rather than delaying that
1199 1195 // to the reference-processing phase. Since all current
1200 1196 // time-stamp policies advance the soft-ref clock only
1201 1197 // at a major collection cycle, this is always currently
1202 1198 // accurate.
1203 1199 if (!_current_soft_ref_policy->should_clear_reference(obj)) {
1204 1200 return false;
1205 1201 }
1206 1202 }
1207 1203
1204 + ResourceMark rm; // Needed for tracing.
1205 +
1208 1206 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
1209 1207 const oop discovered = java_lang_ref_Reference::discovered(obj);
1210 1208 assert(discovered->is_oop_or_null(), "bad discovered field");
1211 1209 if (discovered != NULL) {
1212 1210 // The reference has already been discovered...
1213 1211 if (TraceReferenceGC) {
1214 1212 gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)",
1215 1213 obj, obj->blueprint()->internal_name());
1216 1214 }
1217 1215 if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
1218 1216 // assumes that an object is not processed twice;
1219 1217 // if it's been already discovered it must be on another
1220 1218 // generation's discovered list; so we won't discover it.
1221 1219 return false;
1222 1220 } else {
1223 1221 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery,
1224 1222 "Unrecognized policy");
1225 1223 // Check assumption that an object is not potentially
1226 1224 // discovered twice except by concurrent collectors that potentially
1227 1225 // trace the same Reference object twice.
1228 1226 assert(UseConcMarkSweepGC || UseG1GC,
1229 1227 "Only possible with a concurrent marking collector");
1230 1228 return true;
1231 1229 }
1232 1230 }
1233 1231
1234 1232 if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
1235 1233 verify_referent(obj);
1236 1234 // enqueue if and only if either:
1237 1235 // reference is in our span or
1238 1236 // we are an atomic collector and referent is in our span
1239 1237 if (_span.contains(obj_addr) ||
1240 1238 (discovery_is_atomic() &&
1241 1239 _span.contains(java_lang_ref_Reference::referent(obj)))) {
1242 1240 // should_enqueue = true;
1243 1241 } else {
1244 1242 return false;
1245 1243 }
1246 1244 } else {
1247 1245 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery &&
1248 1246 _span.contains(obj_addr), "code inconsistency");
1249 1247 }
1250 1248
1251 1249 // Get the right type of discovered queue head.
1252 1250 DiscoveredList* list = get_discovered_list(rt);
1253 1251 if (list == NULL) {
1254 1252 return false; // nothing special needs to be done
1255 1253 }
1256 1254
1257 1255 if (_discovery_is_mt) {
1258 1256 add_to_discovered_list_mt(*list, obj, discovered_addr);
1259 1257 } else {
1260 1258 // If "_discovered_list_needs_barrier", we do write barriers when
1261 1259 // updating the discovered reference list. Otherwise, we do a raw store
1262 1260 // here: the field will be visited later when processing the discovered
1263 1261 // references.
1264 1262 oop current_head = list->head();
1265 1263 // As in the case further above, since we are over-writing a NULL
1266 1264 // pre-value, we can safely elide the pre-barrier here for the case of G1.
1267 1265 assert(discovered == NULL, "control point invariant");
1268 1266 if (_discovered_list_needs_barrier && !UseG1GC) { // safe to elide for G1
1269 1267 if (UseCompressedOops) {
1270 1268 _bs->write_ref_field_pre((narrowOop*)discovered_addr, current_head);
1271 1269 } else {
1272 1270 _bs->write_ref_field_pre((oop*)discovered_addr, current_head);
1273 1271 }
1274 1272 guarantee(false, "Need to check non-G1 collector");
1275 1273 }
1276 1274 oop_store_raw(discovered_addr, current_head);
1277 1275 if (_discovered_list_needs_barrier) {
1278 1276 _bs->write_ref_field((void*)discovered_addr, current_head);
1279 1277 }
1280 1278 list->set_head(obj);
1281 1279 list->inc_length(1);
1282 1280
1283 1281 if (TraceReferenceGC) {
1284 1282 gclog_or_tty->print_cr("Enqueued reference (" INTPTR_FORMAT ": %s)",
1285 1283 obj, obj->blueprint()->internal_name());
1286 1284 }
1287 1285 }
1288 1286 assert(obj->is_oop(), "Enqueued a bad reference");
1289 1287 verify_referent(obj);
1290 1288 return true;
1291 1289 }
1292 1290
1293 1291 // Preclean the discovered references by removing those
1294 1292 // whose referents are alive, and by marking from those that
1295 1293 // are not active. These lists can be handled here
1296 1294 // in any order and, indeed, concurrently.
1297 1295 void ReferenceProcessor::preclean_discovered_references(
1298 1296 BoolObjectClosure* is_alive,
1299 1297 OopClosure* keep_alive,
1300 1298 VoidClosure* complete_gc,
1301 1299 YieldClosure* yield,
1302 1300 bool should_unload_classes) {
1303 1301
1304 1302 NOT_PRODUCT(verify_ok_to_handle_reflists());
1305 1303
1306 1304 #ifdef ASSERT
1307 1305 bool must_remember_klasses = ClassUnloading && !UseConcMarkSweepGC ||
1308 1306 CMSClassUnloadingEnabled && UseConcMarkSweepGC ||
1309 1307 ExplicitGCInvokesConcurrentAndUnloadsClasses &&
1310 1308 UseConcMarkSweepGC && should_unload_classes;
1311 1309 RememberKlassesChecker mx(must_remember_klasses);
1312 1310 #endif
1313 1311 // Soft references
1314 1312 {
1315 1313 TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC,
1316 1314 false, gclog_or_tty);
1317 1315 for (int i = 0; i < _max_num_q; i++) {
1318 1316 if (yield->should_return()) {
1319 1317 return;
1320 1318 }
1321 1319 preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive,
1322 1320 keep_alive, complete_gc, yield);
1323 1321 }
1324 1322 }
1325 1323
1326 1324 // Weak references
1327 1325 {
1328 1326 TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC,
1329 1327 false, gclog_or_tty);
1330 1328 for (int i = 0; i < _max_num_q; i++) {
1331 1329 if (yield->should_return()) {
1332 1330 return;
1333 1331 }
1334 1332 preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive,
1335 1333 keep_alive, complete_gc, yield);
1336 1334 }
1337 1335 }
1338 1336
1339 1337 // Final references
1340 1338 {
1341 1339 TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC,
1342 1340 false, gclog_or_tty);
1343 1341 for (int i = 0; i < _max_num_q; i++) {
1344 1342 if (yield->should_return()) {
1345 1343 return;
1346 1344 }
1347 1345 preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive,
1348 1346 keep_alive, complete_gc, yield);
1349 1347 }
1350 1348 }
1351 1349
1352 1350 // Phantom references
1353 1351 {
1354 1352 TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC,
1355 1353 false, gclog_or_tty);
1356 1354 for (int i = 0; i < _max_num_q; i++) {
1357 1355 if (yield->should_return()) {
1358 1356 return;
1359 1357 }
1360 1358 preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive,
1361 1359 keep_alive, complete_gc, yield);
1362 1360 }
1363 1361 }
1364 1362 }
1365 1363
1366 1364 // Walk the given discovered ref list, and remove all reference objects
1367 1365 // whose referents are still alive, whose referents are NULL or which
1368 1366 // are not active (have a non-NULL next field). NOTE: When we are
1369 1367 // thus precleaning the ref lists (which happens single-threaded today),
1370 1368 // we do not disable refs discovery to honour the correct semantics of
1371 1369 // java.lang.Reference. As a result, we need to be careful below
1372 1370 // that ref removal steps interleave safely with ref discovery steps
1373 1371 // (in this thread).
1374 1372 void
1375 1373 ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list,
1376 1374 BoolObjectClosure* is_alive,
1377 1375 OopClosure* keep_alive,
1378 1376 VoidClosure* complete_gc,
1379 1377 YieldClosure* yield) {
1380 1378 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
1381 1379 while (iter.has_next()) {
1382 1380 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
1383 1381 oop obj = iter.obj();
1384 1382 oop next = java_lang_ref_Reference::next(obj);
1385 1383 if (iter.referent() == NULL || iter.is_referent_alive() ||
1386 1384 next != NULL) {
1387 1385 // The referent has been cleared, or is alive, or the Reference is not
1388 1386 // active; we need to trace and mark its cohort.
1389 1387 if (TraceReferenceGC) {
1390 1388 gclog_or_tty->print_cr("Precleaning Reference (" INTPTR_FORMAT ": %s)",
1391 1389 iter.obj(), iter.obj()->blueprint()->internal_name());
1392 1390 }
1393 1391 // Remove Reference object from list
1394 1392 iter.remove();
1395 1393 // Keep alive its cohort.
1396 1394 iter.make_referent_alive();
1397 1395 if (UseCompressedOops) {
1398 1396 narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj);
1399 1397 keep_alive->do_oop(next_addr);
1400 1398 } else {
1401 1399 oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj);
1402 1400 keep_alive->do_oop(next_addr);
1403 1401 }
1404 1402 iter.move_to_next();
1405 1403 } else {
1406 1404 iter.next();
1407 1405 }
1408 1406 }
1409 1407 // Close the reachable set
1410 1408 complete_gc->do_void();
1411 1409
↓ open down ↓ |
194 lines elided |
↑ open up ↑ |
1412 1410 NOT_PRODUCT(
1413 1411 if (PrintGCDetails && PrintReferenceGC && (iter.processed() > 0)) {
1414 1412 gclog_or_tty->print_cr(" Dropped %d Refs out of %d "
1415 1413 "Refs in discovered list " INTPTR_FORMAT,
1416 1414 iter.removed(), iter.processed(), (address)refs_list.head());
1417 1415 }
1418 1416 )
1419 1417 }
1420 1418
1421 1419 const char* ReferenceProcessor::list_name(int i) {
1422 - assert(i >= 0 && i <= _max_num_q * subclasses_of_ref, "Out of bounds index");
1420 + assert(i >= 0 && i <= _max_num_q * subclasses_of_ref(), "Out of bounds index");
1423 1421 int j = i / _max_num_q;
1424 1422 switch (j) {
1425 1423 case 0: return "SoftRef";
1426 1424 case 1: return "WeakRef";
1427 1425 case 2: return "FinalRef";
1428 1426 case 3: return "PhantomRef";
1429 1427 }
1430 1428 ShouldNotReachHere();
1431 1429 return NULL;
1432 1430 }
1433 1431
1434 1432 #ifndef PRODUCT
1435 1433 void ReferenceProcessor::verify_ok_to_handle_reflists() {
1436 1434 // empty for now
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
1437 1435 }
1438 1436 #endif
1439 1437
1440 1438 void ReferenceProcessor::verify() {
1441 1439 guarantee(sentinel_ref() != NULL && sentinel_ref()->is_oop(), "Lost _sentinelRef");
1442 1440 }
1443 1441
1444 1442 #ifndef PRODUCT
1445 1443 void ReferenceProcessor::clear_discovered_references() {
1446 1444 guarantee(!_discovering_refs, "Discovering refs?");
1447 - for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
1445 + for (int i = 0; i < _max_num_q * subclasses_of_ref(); i++) {
1448 1446 oop obj = _discoveredSoftRefs[i].head();
1449 1447 while (obj != sentinel_ref()) {
1450 1448 oop next = java_lang_ref_Reference::discovered(obj);
1451 1449 java_lang_ref_Reference::set_discovered(obj, (oop) NULL);
1452 1450 obj = next;
1453 1451 }
1454 1452 _discoveredSoftRefs[i].set_head(sentinel_ref());
1455 1453 _discoveredSoftRefs[i].set_length(0);
1456 1454 }
1457 1455 }
1458 1456 #endif // PRODUCT
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX