1 /*
2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
162
163 NOT_PRODUCT(
164 if (now < _soft_ref_timestamp_clock) {
165 log_warning(gc)("time warp: " JLONG_FORMAT " to " JLONG_FORMAT,
166 _soft_ref_timestamp_clock, now);
167 }
168 )
169 // The values of now and _soft_ref_timestamp_clock are set using
170 // javaTimeNanos(), which is guaranteed to be monotonically
171 // non-decreasing provided the underlying platform provides such
172 // a time source (and it is bug free).
173 // In product mode, however, protect ourselves from non-monotonicity.
174 if (now > _soft_ref_timestamp_clock) {
175 _soft_ref_timestamp_clock = now;
176 java_lang_ref_SoftReference::set_clock(now);
177 }
178 // Else leave clock stalled at its old value until time progresses
179 // past clock value.
180 }
181
182 size_t ReferenceProcessor::total_count(DiscoveredList lists[]) {
183 size_t total = 0;
184 for (uint i = 0; i < _max_num_q; ++i) {
185 total += lists[i].length();
186 }
187 return total;
188 }
189
190 ReferenceProcessorStats ReferenceProcessor::process_discovered_references(
191 BoolObjectClosure* is_alive,
192 OopClosure* keep_alive,
193 VoidClosure* complete_gc,
194 AbstractRefProcTaskExecutor* task_executor,
195 GCTimer* gc_timer) {
196
197 assert(!enqueuing_is_done(), "If here enqueuing should not be complete");
198 // Stop treating discovered references specially.
199 disable_discovery();
200
201 // If discovery was concurrent, someone could have modified
202 // the value of the static field in the j.l.r.SoftReference
203 // class that holds the soft reference timestamp clock using
204 // reflection or Unsafe between when discovery was enabled and
205 // now. Unconditionally update the static field in ReferenceProcessor
206 // here so that we use the new value during processing of the
207 // discovered soft refs.
208
209 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock();
210
211 ReferenceProcessorStats stats(
212 total_count(_discoveredSoftRefs),
213 total_count(_discoveredWeakRefs),
214 total_count(_discoveredFinalRefs),
215 total_count(_discoveredPhantomRefs));
216
217 // Soft references
218 {
219 GCTraceTime(Debug, gc, ref) tt("SoftReference", gc_timer);
220 process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true,
221 is_alive, keep_alive, complete_gc, task_executor);
222 }
223
224 update_soft_ref_master_clock();
225
226 // Weak references
227 {
228 GCTraceTime(Debug, gc, ref) tt("WeakReference", gc_timer);
229 process_discovered_reflist(_discoveredWeakRefs, NULL, true,
230 is_alive, keep_alive, complete_gc, task_executor);
231 }
232
233 // Final references
234 {
235 GCTraceTime(Debug, gc, ref) tt("FinalReference", gc_timer);
236 process_discovered_reflist(_discoveredFinalRefs, NULL, false,
237 is_alive, keep_alive, complete_gc, task_executor);
238 }
239
240 // Phantom references
241 {
242 GCTraceTime(Debug, gc, ref) tt("PhantomReference", gc_timer);
243 process_discovered_reflist(_discoveredPhantomRefs, NULL, true,
244 is_alive, keep_alive, complete_gc, task_executor);
245 }
246
247 // Weak global JNI references. It would make more sense (semantically) to
248 // traverse these simultaneously with the regular weak references above, but
249 // that is not how the JDK1.2 specification is. See #4126360. Native code can
250 // thus use JNI weak references to circumvent the phantom references and
251 // resurrect a "post-mortem" object.
252 {
253 GCTraceTime(Debug, gc, ref) tt("JNI Weak Reference", gc_timer);
254 if (task_executor != NULL) {
255 task_executor->set_single_threaded_mode();
256 }
257 process_phaseJNI(is_alive, keep_alive, complete_gc);
258 }
259
260 log_debug(gc, ref)("Ref Counts: Soft: " SIZE_FORMAT " Weak: " SIZE_FORMAT " Final: " SIZE_FORMAT " Phantom: " SIZE_FORMAT,
261 stats.soft_count(), stats.weak_count(), stats.final_count(), stats.phantom_count());
262 log_develop_trace(gc, ref)("JNI Weak Reference count: " SIZE_FORMAT, count_jni_refs());
263
264 return stats;
265 }
266
267 #ifndef PRODUCT
268 // Calculate the number of jni handles.
269 size_t ReferenceProcessor::count_jni_refs() {
270 class CountHandleClosure: public OopClosure {
271 private:
272 size_t _count;
273 public:
274 CountHandleClosure(): _count(0) {}
275 void do_oop(oop* unused) { _count++; }
276 void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
277 size_t count() { return _count; }
278 };
279 CountHandleClosure global_handle_count;
280 JNIHandles::weak_oops_do(&global_handle_count);
281 return global_handle_count.count();
282 }
283 #endif
284
285 void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive,
286 OopClosure* keep_alive,
287 VoidClosure* complete_gc) {
288 JNIHandles::weak_oops_do(is_alive, keep_alive);
289 complete_gc->do_void();
290 }
291
292 void ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) {
293 // Enqueue references that are not made active again, and
294 // clear the decks for the next collection (cycle).
295 enqueue_discovered_reflists(task_executor);
296
297 // Stop treating discovered references specially.
298 disable_discovery();
299 }
300
301 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list) {
302 // Given a list of refs linked through the "discovered" field
303 // (java.lang.ref.Reference.discovered), self-loop their "next" field
304 // thus distinguishing them from active References, then
305 // prepend them to the pending list.
306 //
307 // The Java threads will see the Reference objects linked together through
308 // the discovered field. Instead of trying to do the write barrier updates
309 // in all places in the reference processor where we manipulate the discovered
310 // field we make sure to do the barrier here where we anyway iterate through
311 // all linked Reference objects. Note that it is important to not dirty any
312 // cards during reference processing since this will cause card table
313 // verification to fail for G1.
314 log_develop_trace(gc, ref)("ReferenceProcessor::enqueue_discovered_reflist list " INTPTR_FORMAT, p2i(&refs_list));
315
328 // Self-loop next, so as to make Ref not active.
329 java_lang_ref_Reference::set_next_raw(obj, obj);
330 if (next_d != obj) {
331 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), next_d);
332 } else {
333 // This is the last object.
334 // Swap refs_list into pending list and set obj's
335 // discovered to what we read from the pending list.
336 oop old = Universe::swap_reference_pending_list(refs_list.head());
337 java_lang_ref_Reference::set_discovered_raw(obj, old); // old may be NULL
338 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old);
339 }
340 }
341 }
342
343 // Parallel enqueue task
344 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask {
345 public:
346 RefProcEnqueueTask(ReferenceProcessor& ref_processor,
347 DiscoveredList discovered_refs[],
348 int n_queues)
349 : EnqueueTask(ref_processor, discovered_refs, n_queues)
350 { }
351
352 virtual void work(unsigned int work_id) {
353 assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds");
354 // Simplest first cut: static partitioning.
355 int index = work_id;
356 // The increment on "index" must correspond to the maximum number of queues
357 // (n_queues) with which that ReferenceProcessor was created. That
358 // is because of the "clever" way the discovered references lists were
359 // allocated and are indexed into.
360 assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected");
361 for (int j = 0;
362 j < ReferenceProcessor::number_of_subclasses_of_ref();
363 j++, index += _n_queues) {
364 _ref_processor.enqueue_discovered_reflist(_refs_lists[index]);
365 _refs_lists[index].set_head(NULL);
366 _refs_lists[index].set_length(0);
367 }
368 }
369 };
370
371 // Enqueue references that are not made active again
372 void ReferenceProcessor::enqueue_discovered_reflists(AbstractRefProcTaskExecutor* task_executor) {
373 if (_processing_is_mt && task_executor != NULL) {
374 // Parallel code
375 RefProcEnqueueTask tsk(*this, _discovered_refs, _max_num_q);
376 task_executor->execute(tsk);
377 } else {
378 // Serial code: call the parent class's implementation
379 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
380 enqueue_discovered_reflist(_discovered_refs[i]);
381 _discovered_refs[i].set_head(NULL);
382 _discovered_refs[i].set_length(0);
383 }
384 }
385 }
386
387 void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
388 _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref);
389 oop discovered = java_lang_ref_Reference::discovered(_ref);
390 assert(_discovered_addr && discovered->is_oop_or_null(),
391 "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered));
392 _next = discovered;
393 _referent_addr = java_lang_ref_Reference::referent_addr(_ref);
394 _referent = java_lang_ref_Reference::referent(_ref);
395 assert(Universe::heap()->is_in_reserved_or_null(_referent),
452 while (iter.has_next()) {
453 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));
454 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive();
455 if (referent_is_dead &&
456 !policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) {
457 log_develop_trace(gc, ref)("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy",
458 p2i(iter.obj()), iter.obj()->klass()->internal_name());
459 // Remove Reference object from list
460 iter.remove();
461 // keep the referent around
462 iter.make_referent_alive();
463 iter.move_to_next();
464 } else {
465 iter.next();
466 }
467 }
468 // Close the reachable set
469 complete_gc->do_void();
470 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " dead Refs out of " SIZE_FORMAT " discovered Refs by policy, from list " INTPTR_FORMAT,
471 iter.removed(), iter.processed(), p2i(&refs_list));
472 }
473
474 // Traverse the list and remove any Refs that are not active, or
475 // whose referents are either alive or NULL.
476 void
477 ReferenceProcessor::pp2_work(DiscoveredList& refs_list,
478 BoolObjectClosure* is_alive,
479 OopClosure* keep_alive) {
480 assert(discovery_is_atomic(), "Error");
481 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
482 while (iter.has_next()) {
483 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
484 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());)
485 assert(next == NULL, "Should not discover inactive Reference");
486 if (iter.is_referent_alive()) {
487 log_develop_trace(gc, ref)("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)",
488 p2i(iter.obj()), iter.obj()->klass()->internal_name());
489 // The referent is reachable after all.
490 // Remove Reference object from list.
491 iter.remove();
492 // Update the referent pointer as necessary: Note that this
581 oop next = refs_list.head();
582 while (next != obj) {
583 obj = next;
584 next = java_lang_ref_Reference::discovered(obj);
585 java_lang_ref_Reference::set_discovered_raw(obj, NULL);
586 }
587 refs_list.set_head(NULL);
588 refs_list.set_length(0);
589 }
590
591 void ReferenceProcessor::abandon_partial_discovery() {
592 // loop over the lists
593 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
594 if ((i % _max_num_q) == 0) {
595 log_develop_trace(gc, ref)("Abandoning %s discovered list", list_name(i));
596 }
597 clear_discovered_references(_discovered_refs[i]);
598 }
599 }
600
601 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask {
602 public:
603 RefProcPhase1Task(ReferenceProcessor& ref_processor,
604 DiscoveredList refs_lists[],
605 ReferencePolicy* policy,
606 bool marks_oops_alive)
607 : ProcessTask(ref_processor, refs_lists, marks_oops_alive),
608 _policy(policy)
609 { }
610 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
611 OopClosure& keep_alive,
612 VoidClosure& complete_gc)
613 {
614 _ref_processor.process_phase1(_refs_lists[i], _policy,
615 &is_alive, &keep_alive, &complete_gc);
616 }
617 private:
618 ReferencePolicy* _policy;
619 };
620
621 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask {
622 public:
623 RefProcPhase2Task(ReferenceProcessor& ref_processor,
624 DiscoveredList refs_lists[],
625 bool marks_oops_alive)
626 : ProcessTask(ref_processor, refs_lists, marks_oops_alive)
627 { }
628 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
629 OopClosure& keep_alive,
630 VoidClosure& complete_gc)
631 {
632 _ref_processor.process_phase2(_refs_lists[i],
633 &is_alive, &keep_alive, &complete_gc);
634 }
635 };
636
637 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask {
638 public:
639 RefProcPhase3Task(ReferenceProcessor& ref_processor,
640 DiscoveredList refs_lists[],
641 bool clear_referent,
642 bool marks_oops_alive)
643 : ProcessTask(ref_processor, refs_lists, marks_oops_alive),
644 _clear_referent(clear_referent)
645 { }
646 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
647 OopClosure& keep_alive,
648 VoidClosure& complete_gc)
649 {
650 _ref_processor.process_phase3(_refs_lists[i], _clear_referent,
651 &is_alive, &keep_alive, &complete_gc);
652 }
653 private:
654 bool _clear_referent;
655 };
656
657 #ifndef PRODUCT
658 void ReferenceProcessor::log_reflist_counts(DiscoveredList ref_lists[], uint active_length, size_t total_refs) {
659 if (!log_is_enabled(Trace, gc, ref)) {
660 return;
661 }
662
663 stringStream st;
664 for (uint i = 0; i < active_length; ++i) {
665 st.print(SIZE_FORMAT " ", ref_lists[i].length());
666 }
667 log_develop_trace(gc, ref)("%s= " SIZE_FORMAT, st.as_string(), total_refs);
668 #ifdef ASSERT
669 for (uint i = active_length; i < _max_num_q; i++) {
759 }
760 log_reflist_counts(ref_lists, _num_q, balanced_total_refs);
761 assert(total_refs == balanced_total_refs, "Balancing was incomplete");
762 #endif
763 }
764
765 void ReferenceProcessor::balance_all_queues() {
766 balance_queues(_discoveredSoftRefs);
767 balance_queues(_discoveredWeakRefs);
768 balance_queues(_discoveredFinalRefs);
769 balance_queues(_discoveredPhantomRefs);
770 }
771
772 void ReferenceProcessor::process_discovered_reflist(
773 DiscoveredList refs_lists[],
774 ReferencePolicy* policy,
775 bool clear_referent,
776 BoolObjectClosure* is_alive,
777 OopClosure* keep_alive,
778 VoidClosure* complete_gc,
779 AbstractRefProcTaskExecutor* task_executor)
780 {
781 bool mt_processing = task_executor != NULL && _processing_is_mt;
782 // If discovery used MT and a dynamic number of GC threads, then
783 // the queues must be balanced for correctness if fewer than the
784 // maximum number of queues were used. The number of queue used
785 // during discovery may be different than the number to be used
786 // for processing so don't depend of _num_q < _max_num_q as part
787 // of the test.
788 bool must_balance = _discovery_is_mt;
789
790 if ((mt_processing && ParallelRefProcBalancingEnabled) ||
791 must_balance) {
792 balance_queues(refs_lists);
793 }
794
795 // Phase 1 (soft refs only):
796 // . Traverse the list and remove any SoftReferences whose
797 // referents are not alive, but that should be kept alive for
798 // policy reasons. Keep alive the transitive closure of all
799 // such referents.
800 if (policy != NULL) {
801 if (mt_processing) {
802 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/);
803 task_executor->execute(phase1);
804 } else {
805 for (uint i = 0; i < _max_num_q; i++) {
806 process_phase1(refs_lists[i], policy,
807 is_alive, keep_alive, complete_gc);
808 }
809 }
810 } else { // policy == NULL
811 assert(refs_lists != _discoveredSoftRefs,
812 "Policy must be specified for soft references.");
813 }
814
815 // Phase 2:
816 // . Traverse the list and remove any refs whose referents are alive.
817 if (mt_processing) {
818 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/);
819 task_executor->execute(phase2);
820 } else {
821 for (uint i = 0; i < _max_num_q; i++) {
822 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc);
823 }
824 }
825
826 // Phase 3:
827 // . Traverse the list and process referents as appropriate.
828 if (mt_processing) {
829 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/);
830 task_executor->execute(phase3);
831 } else {
832 for (uint i = 0; i < _max_num_q; i++) {
833 process_phase3(refs_lists[i], clear_referent,
834 is_alive, keep_alive, complete_gc);
835 }
836 }
837 }
838
839 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) {
840 uint id = 0;
841 // Determine the queue index to use for this object.
842 if (_discovery_is_mt) {
843 // During a multi-threaded discovery phase,
844 // each thread saves to its "own" list.
845 Thread* thr = Thread::current();
846 id = thr->as_Worker_thread()->id();
847 } else {
848 // single-threaded discovery, we save in round-robin
849 // fashion to each of the lists.
850 if (_processing_is_mt) {
851 id = next_id();
852 }
853 }
854 assert(id < _max_num_q, "Id is out-of-bounds id %u and max id %u)", id, _max_num_q);
855
856 // Get the discovered queue to which we will add
1179 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " Refs out of " SIZE_FORMAT " Refs in discovered list " INTPTR_FORMAT,
1180 iter.removed(), iter.processed(), p2i(&refs_list));
1181 }
1182 )
1183 }
1184
1185 const char* ReferenceProcessor::list_name(uint i) {
1186 assert(i <= _max_num_q * number_of_subclasses_of_ref(),
1187 "Out of bounds index");
1188
1189 int j = i / _max_num_q;
1190 switch (j) {
1191 case 0: return "SoftRef";
1192 case 1: return "WeakRef";
1193 case 2: return "FinalRef";
1194 case 3: return "PhantomRef";
1195 }
1196 ShouldNotReachHere();
1197 return NULL;
1198 }
1199
|
1 /*
2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
162
163 NOT_PRODUCT(
164 if (now < _soft_ref_timestamp_clock) {
165 log_warning(gc)("time warp: " JLONG_FORMAT " to " JLONG_FORMAT,
166 _soft_ref_timestamp_clock, now);
167 }
168 )
169 // The values of now and _soft_ref_timestamp_clock are set using
170 // javaTimeNanos(), which is guaranteed to be monotonically
171 // non-decreasing provided the underlying platform provides such
172 // a time source (and it is bug free).
173 // In product mode, however, protect ourselves from non-monotonicity.
174 if (now > _soft_ref_timestamp_clock) {
175 _soft_ref_timestamp_clock = now;
176 java_lang_ref_SoftReference::set_clock(now);
177 }
178 // Else leave clock stalled at its old value until time progresses
179 // past clock value.
180 }
181
182 size_t ReferenceProcessor::total_count(DiscoveredList lists[]) const {
183 size_t total = 0;
184 for (uint i = 0; i < _max_num_q; ++i) {
185 total += lists[i].length();
186 }
187 return total;
188 }
189
190 ReferenceProcessorStats ReferenceProcessor::process_discovered_references(
191 BoolObjectClosure* is_alive,
192 OopClosure* keep_alive,
193 VoidClosure* complete_gc,
194 AbstractRefProcTaskExecutor* task_executor,
195 ReferenceProcessorPhaseTimes* phase_times) {
196 double start_time = os::elapsedTime();
197
198 assert(!enqueuing_is_done(), "If here enqueuing should not be complete");
199 // Stop treating discovered references specially.
200 disable_discovery();
201
202 // If discovery was concurrent, someone could have modified
203 // the value of the static field in the j.l.r.SoftReference
204 // class that holds the soft reference timestamp clock using
205 // reflection or Unsafe between when discovery was enabled and
206 // now. Unconditionally update the static field in ReferenceProcessor
207 // here so that we use the new value during processing of the
208 // discovered soft refs.
209
210 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock();
211
212 ReferenceProcessorStats stats(total_count(_discoveredSoftRefs),
213 total_count(_discoveredWeakRefs),
214 total_count(_discoveredFinalRefs),
215 total_count(_discoveredPhantomRefs));
216
217 // Soft references
218 {
219 RefProcPhaseTimesTracker tt(REF_SOFT, phase_times, this);
220 process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true,
221 is_alive, keep_alive, complete_gc, task_executor, phase_times);
222 }
223
224 update_soft_ref_master_clock();
225
226 // Weak references
227 {
228 RefProcPhaseTimesTracker tt(REF_WEAK, phase_times, this);
229 process_discovered_reflist(_discoveredWeakRefs, NULL, true,
230 is_alive, keep_alive, complete_gc, task_executor, phase_times);
231 }
232
233 // Final references
234 {
235 RefProcPhaseTimesTracker tt(REF_FINAL, phase_times, this);
236 process_discovered_reflist(_discoveredFinalRefs, NULL, false,
237 is_alive, keep_alive, complete_gc, task_executor, phase_times);
238 }
239
240 // Phantom references
241 {
242 RefProcPhaseTimesTracker tt(REF_PHANTOM, phase_times, this);
243 process_discovered_reflist(_discoveredPhantomRefs, NULL, true,
244 is_alive, keep_alive, complete_gc, task_executor, phase_times);
245 }
246
247 // Weak global JNI references. It would make more sense (semantically) to
248 // traverse these simultaneously with the regular weak references above, but
249 // that is not how the JDK1.2 specification is. See #4126360. Native code can
250 // thus use JNI weak references to circumvent the phantom references and
251 // resurrect a "post-mortem" object.
252 {
253 GCTraceTime(Debug, gc, ref) tt("JNI Weak Reference", phase_times->gc_timer());
254 if (task_executor != NULL) {
255 task_executor->set_single_threaded_mode();
256 }
257 process_phaseJNI(is_alive, keep_alive, complete_gc);
258 }
259
260 phase_times->set_total_time_ms((os::elapsedTime() - start_time) * 1000);
261
262 log_develop_trace(gc, ref)("JNI Weak Reference count: " SIZE_FORMAT, count_jni_refs());
263
264 return stats;
265 }
266
267 #ifndef PRODUCT
268 // Calculate the number of jni handles.
269 size_t ReferenceProcessor::count_jni_refs() {
270 class CountHandleClosure: public OopClosure {
271 private:
272 size_t _count;
273 public:
274 CountHandleClosure(): _count(0) {}
275 void do_oop(oop* unused) { _count++; }
276 void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
277 size_t count() { return _count; }
278 };
279 CountHandleClosure global_handle_count;
280 JNIHandles::weak_oops_do(&global_handle_count);
281 return global_handle_count.count();
282 }
283 #endif
284
285 void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive,
286 OopClosure* keep_alive,
287 VoidClosure* complete_gc) {
288 JNIHandles::weak_oops_do(is_alive, keep_alive);
289 complete_gc->do_void();
290 }
291
292 void ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor,
293 ReferenceProcessorPhaseTimes* phase_times) {
294 // Enqueue references that are not made active again, and
295 // clear the decks for the next collection (cycle).
296 enqueue_discovered_reflists(task_executor, phase_times);
297
298 // Stop treating discovered references specially.
299 disable_discovery();
300 }
301
302 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list) {
303 // Given a list of refs linked through the "discovered" field
304 // (java.lang.ref.Reference.discovered), self-loop their "next" field
305 // thus distinguishing them from active References, then
306 // prepend them to the pending list.
307 //
308 // The Java threads will see the Reference objects linked together through
309 // the discovered field. Instead of trying to do the write barrier updates
310 // in all places in the reference processor where we manipulate the discovered
311 // field we make sure to do the barrier here where we anyway iterate through
312 // all linked Reference objects. Note that it is important to not dirty any
313 // cards during reference processing since this will cause card table
314 // verification to fail for G1.
315 log_develop_trace(gc, ref)("ReferenceProcessor::enqueue_discovered_reflist list " INTPTR_FORMAT, p2i(&refs_list));
316
329 // Self-loop next, so as to make Ref not active.
330 java_lang_ref_Reference::set_next_raw(obj, obj);
331 if (next_d != obj) {
332 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), next_d);
333 } else {
334 // This is the last object.
335 // Swap refs_list into pending list and set obj's
336 // discovered to what we read from the pending list.
337 oop old = Universe::swap_reference_pending_list(refs_list.head());
338 java_lang_ref_Reference::set_discovered_raw(obj, old); // old may be NULL
339 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old);
340 }
341 }
342 }
343
344 // Parallel enqueue task
345 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask {
346 public:
347 RefProcEnqueueTask(ReferenceProcessor& ref_processor,
348 DiscoveredList discovered_refs[],
349 int n_queues,
350 ReferenceProcessorPhaseTimes* phase_times)
351 : EnqueueTask(ref_processor, discovered_refs, n_queues, phase_times)
352 { }
353
354 virtual void work(unsigned int work_id) {
355 RefProcWorkerTimeTracker tt(ReferenceProcessorPhaseTimes::RefEnqueue, _phase_times, work_id);
356
357 assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds");
358 // Simplest first cut: static partitioning.
359 int index = work_id;
360 // The increment on "index" must correspond to the maximum number of queues
361 // (n_queues) with which that ReferenceProcessor was created. That
362 // is because of the "clever" way the discovered references lists were
363 // allocated and are indexed into.
364 assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected");
365 for (int j = 0;
366 j < ReferenceProcessor::number_of_subclasses_of_ref();
367 j++, index += _n_queues) {
368 _ref_processor.enqueue_discovered_reflist(_refs_lists[index]);
369 _refs_lists[index].set_head(NULL);
370 _refs_lists[index].set_length(0);
371 }
372 }
373 };
374
375 // Enqueue references that are not made active again
376 void ReferenceProcessor::enqueue_discovered_reflists(AbstractRefProcTaskExecutor* task_executor,
377 ReferenceProcessorPhaseTimes* phase_times) {
378
379 ReferenceProcessorStats stats(total_count(_discoveredSoftRefs),
380 total_count(_discoveredWeakRefs),
381 total_count(_discoveredFinalRefs),
382 total_count(_discoveredPhantomRefs));
383
384 RefProcEnqueueTimeTracker tt(phase_times, stats);
385
386 if (_processing_is_mt && task_executor != NULL) {
387 // Parallel code
388 RefProcEnqueueTask tsk(*this, _discovered_refs, _max_num_q, phase_times);
389 task_executor->execute(tsk);
390 } else {
391 // Serial code: call the parent class's implementation
392 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
393 enqueue_discovered_reflist(_discovered_refs[i]);
394 _discovered_refs[i].set_head(NULL);
395 _discovered_refs[i].set_length(0);
396 }
397 }
398 }
399
400 void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
401 _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref);
402 oop discovered = java_lang_ref_Reference::discovered(_ref);
403 assert(_discovered_addr && discovered->is_oop_or_null(),
404 "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered));
405 _next = discovered;
406 _referent_addr = java_lang_ref_Reference::referent_addr(_ref);
407 _referent = java_lang_ref_Reference::referent(_ref);
408 assert(Universe::heap()->is_in_reserved_or_null(_referent),
465 while (iter.has_next()) {
466 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));
467 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive();
468 if (referent_is_dead &&
469 !policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) {
470 log_develop_trace(gc, ref)("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy",
471 p2i(iter.obj()), iter.obj()->klass()->internal_name());
472 // Remove Reference object from list
473 iter.remove();
474 // keep the referent around
475 iter.make_referent_alive();
476 iter.move_to_next();
477 } else {
478 iter.next();
479 }
480 }
481 // Close the reachable set
482 complete_gc->do_void();
483 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " dead Refs out of " SIZE_FORMAT " discovered Refs by policy, from list " INTPTR_FORMAT,
484 iter.removed(), iter.processed(), p2i(&refs_list));
485 }
486
487 // Traverse the list and remove any Refs that are not active, or
488 // whose referents are either alive or NULL.
489 void
490 ReferenceProcessor::pp2_work(DiscoveredList& refs_list,
491 BoolObjectClosure* is_alive,
492 OopClosure* keep_alive) {
493 assert(discovery_is_atomic(), "Error");
494 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
495 while (iter.has_next()) {
496 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
497 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());)
498 assert(next == NULL, "Should not discover inactive Reference");
499 if (iter.is_referent_alive()) {
500 log_develop_trace(gc, ref)("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)",
501 p2i(iter.obj()), iter.obj()->klass()->internal_name());
502 // The referent is reachable after all.
503 // Remove Reference object from list.
504 iter.remove();
505 // Update the referent pointer as necessary: Note that this
594 oop next = refs_list.head();
595 while (next != obj) {
596 obj = next;
597 next = java_lang_ref_Reference::discovered(obj);
598 java_lang_ref_Reference::set_discovered_raw(obj, NULL);
599 }
600 refs_list.set_head(NULL);
601 refs_list.set_length(0);
602 }
603
604 void ReferenceProcessor::abandon_partial_discovery() {
605 // loop over the lists
606 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
607 if ((i % _max_num_q) == 0) {
608 log_develop_trace(gc, ref)("Abandoning %s discovered list", list_name(i));
609 }
610 clear_discovered_references(_discovered_refs[i]);
611 }
612 }
613
614 size_t ReferenceProcessor::total_reference_count(ReferenceType type) const {
615 DiscoveredList* list = NULL;
616
617 switch (type) {
618 case REF_SOFT:
619 list = _discoveredSoftRefs;
620 break;
621 case REF_WEAK:
622 list = _discoveredWeakRefs;
623 break;
624 case REF_FINAL:
625 list = _discoveredFinalRefs;
626 break;
627 case REF_PHANTOM:
628 list = _discoveredPhantomRefs;
629 break;
630 case REF_OTHER:
631 case REF_NONE:
632 default:
633 ShouldNotReachHere();
634 }
635 return total_count(list);
636 }
637
638 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask {
639 public:
640 RefProcPhase1Task(ReferenceProcessor& ref_processor,
641 DiscoveredList refs_lists[],
642 ReferencePolicy* policy,
643 bool marks_oops_alive,
644 ReferenceProcessorPhaseTimes* phase_times)
645 : ProcessTask(ref_processor, refs_lists, marks_oops_alive, phase_times),
646 _policy(policy)
647 { }
648 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
649 OopClosure& keep_alive,
650 VoidClosure& complete_gc)
651 {
652 RefProcWorkerTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase1, _phase_times, i);
653
654 _ref_processor.process_phase1(_refs_lists[i], _policy,
655 &is_alive, &keep_alive, &complete_gc);
656 }
657 private:
658 ReferencePolicy* _policy;
659 };
660
661 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask {
662 public:
663 RefProcPhase2Task(ReferenceProcessor& ref_processor,
664 DiscoveredList refs_lists[],
665 bool marks_oops_alive,
666 ReferenceProcessorPhaseTimes* phase_times)
667 : ProcessTask(ref_processor, refs_lists, marks_oops_alive, phase_times)
668 { }
669 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
670 OopClosure& keep_alive,
671 VoidClosure& complete_gc)
672 {
673 RefProcWorkerTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase2, _phase_times, i);
674
675 _ref_processor.process_phase2(_refs_lists[i],
676 &is_alive, &keep_alive, &complete_gc);
677 }
678 };
679
680 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask {
681 public:
682 RefProcPhase3Task(ReferenceProcessor& ref_processor,
683 DiscoveredList refs_lists[],
684 bool clear_referent,
685 bool marks_oops_alive,
686 ReferenceProcessorPhaseTimes* phase_times)
687 : ProcessTask(ref_processor, refs_lists, marks_oops_alive, phase_times),
688 _clear_referent(clear_referent)
689 { }
690 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
691 OopClosure& keep_alive,
692 VoidClosure& complete_gc)
693 {
694 RefProcWorkerTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase3, _phase_times, i);
695
696 _ref_processor.process_phase3(_refs_lists[i], _clear_referent,
697 &is_alive, &keep_alive, &complete_gc);
698 }
699 private:
700 bool _clear_referent;
701 };
702
703 #ifndef PRODUCT
704 void ReferenceProcessor::log_reflist_counts(DiscoveredList ref_lists[], uint active_length, size_t total_refs) {
705 if (!log_is_enabled(Trace, gc, ref)) {
706 return;
707 }
708
709 stringStream st;
710 for (uint i = 0; i < active_length; ++i) {
711 st.print(SIZE_FORMAT " ", ref_lists[i].length());
712 }
713 log_develop_trace(gc, ref)("%s= " SIZE_FORMAT, st.as_string(), total_refs);
714 #ifdef ASSERT
715 for (uint i = active_length; i < _max_num_q; i++) {
805 }
806 log_reflist_counts(ref_lists, _num_q, balanced_total_refs);
807 assert(total_refs == balanced_total_refs, "Balancing was incomplete");
808 #endif
809 }
810
811 void ReferenceProcessor::balance_all_queues() {
812 balance_queues(_discoveredSoftRefs);
813 balance_queues(_discoveredWeakRefs);
814 balance_queues(_discoveredFinalRefs);
815 balance_queues(_discoveredPhantomRefs);
816 }
817
818 void ReferenceProcessor::process_discovered_reflist(
819 DiscoveredList refs_lists[],
820 ReferencePolicy* policy,
821 bool clear_referent,
822 BoolObjectClosure* is_alive,
823 OopClosure* keep_alive,
824 VoidClosure* complete_gc,
825 AbstractRefProcTaskExecutor* task_executor,
826 ReferenceProcessorPhaseTimes* phase_times)
827 {
828 bool mt_processing = task_executor != NULL && _processing_is_mt;
829
830 phase_times->set_processing_is_mt(mt_processing);
831
832 // If discovery used MT and a dynamic number of GC threads, then
833 // the queues must be balanced for correctness if fewer than the
834 // maximum number of queues were used. The number of queue used
835 // during discovery may be different than the number to be used
836 // for processing so don't depend of _num_q < _max_num_q as part
837 // of the test.
838 bool must_balance = _discovery_is_mt;
839
840 if ((mt_processing && ParallelRefProcBalancingEnabled) ||
841 must_balance) {
842 RefProcBalanceQueuesTimeTracker tt(phase_times);
843 balance_queues(refs_lists);
844 }
845
846 // Phase 1 (soft refs only):
847 // . Traverse the list and remove any SoftReferences whose
848 // referents are not alive, but that should be kept alive for
849 // policy reasons. Keep alive the transitive closure of all
850 // such referents.
851 if (policy != NULL) {
852 RefProcParPhaseTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase1, phase_times);
853
854 if (mt_processing) {
855 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/, phase_times);
856 task_executor->execute(phase1);
857 } else {
858 for (uint i = 0; i < _max_num_q; i++) {
859 process_phase1(refs_lists[i], policy,
860 is_alive, keep_alive, complete_gc);
861 }
862 }
863 } else { // policy == NULL
864 assert(refs_lists != _discoveredSoftRefs,
865 "Policy must be specified for soft references.");
866 }
867
868 // Phase 2:
869 // . Traverse the list and remove any refs whose referents are alive.
870 {
871 RefProcParPhaseTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase2, phase_times);
872
873 if (mt_processing) {
874 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/, phase_times);
875 task_executor->execute(phase2);
876 } else {
877 for (uint i = 0; i < _max_num_q; i++) {
878 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc);
879 }
880 }
881 }
882
883 // Phase 3:
884 // . Traverse the list and process referents as appropriate.
885 {
886 RefProcParPhaseTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase3, phase_times);
887
888 if (mt_processing) {
889 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/, phase_times);
890 task_executor->execute(phase3);
891 } else {
892 for (uint i = 0; i < _max_num_q; i++) {
893 process_phase3(refs_lists[i], clear_referent,
894 is_alive, keep_alive, complete_gc);
895 }
896 }
897 }
898 }
899
900 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) {
901 uint id = 0;
902 // Determine the queue index to use for this object.
903 if (_discovery_is_mt) {
904 // During a multi-threaded discovery phase,
905 // each thread saves to its "own" list.
906 Thread* thr = Thread::current();
907 id = thr->as_Worker_thread()->id();
908 } else {
909 // single-threaded discovery, we save in round-robin
910 // fashion to each of the lists.
911 if (_processing_is_mt) {
912 id = next_id();
913 }
914 }
915 assert(id < _max_num_q, "Id is out-of-bounds id %u and max id %u)", id, _max_num_q);
916
917 // Get the discovered queue to which we will add
1240 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " Refs out of " SIZE_FORMAT " Refs in discovered list " INTPTR_FORMAT,
1241 iter.removed(), iter.processed(), p2i(&refs_list));
1242 }
1243 )
1244 }
1245
1246 const char* ReferenceProcessor::list_name(uint i) {
1247 assert(i <= _max_num_q * number_of_subclasses_of_ref(),
1248 "Out of bounds index");
1249
1250 int j = i / _max_num_q;
1251 switch (j) {
1252 case 0: return "SoftRef";
1253 case 1: return "WeakRef";
1254 case 2: return "FinalRef";
1255 case 3: return "PhantomRef";
1256 }
1257 ShouldNotReachHere();
1258 return NULL;
1259 }
|