1 /*
2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
162
163 NOT_PRODUCT(
164 if (now < _soft_ref_timestamp_clock) {
165 log_warning(gc)("time warp: " JLONG_FORMAT " to " JLONG_FORMAT,
166 _soft_ref_timestamp_clock, now);
167 }
168 )
169 // The values of now and _soft_ref_timestamp_clock are set using
170 // javaTimeNanos(), which is guaranteed to be monotonically
171 // non-decreasing provided the underlying platform provides such
172 // a time source (and it is bug free).
173 // In product mode, however, protect ourselves from non-monotonicity.
174 if (now > _soft_ref_timestamp_clock) {
175 _soft_ref_timestamp_clock = now;
176 java_lang_ref_SoftReference::set_clock(now);
177 }
178 // Else leave clock stalled at its old value until time progresses
179 // past clock value.
180 }
181
182 size_t ReferenceProcessor::total_count(DiscoveredList lists[]) {
183 size_t total = 0;
184 for (uint i = 0; i < _max_num_q; ++i) {
185 total += lists[i].length();
186 }
187 return total;
188 }
189
190 ReferenceProcessorStats ReferenceProcessor::process_discovered_references(
191 BoolObjectClosure* is_alive,
192 OopClosure* keep_alive,
193 VoidClosure* complete_gc,
194 AbstractRefProcTaskExecutor* task_executor,
195 GCTimer* gc_timer) {
196
197 assert(!enqueuing_is_done(), "If here enqueuing should not be complete");
198 // Stop treating discovered references specially.
199 disable_discovery();
200
201 // If discovery was concurrent, someone could have modified
202 // the value of the static field in the j.l.r.SoftReference
203 // class that holds the soft reference timestamp clock using
204 // reflection or Unsafe between when discovery was enabled and
205 // now. Unconditionally update the static field in ReferenceProcessor
206 // here so that we use the new value during processing of the
207 // discovered soft refs.
208
209 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock();
210
211 ReferenceProcessorStats stats(
212 total_count(_discoveredSoftRefs),
213 total_count(_discoveredWeakRefs),
214 total_count(_discoveredFinalRefs),
215 total_count(_discoveredPhantomRefs));
216
217 // Soft references
218 {
219 GCTraceTime(Debug, gc, ref) tt("SoftReference", gc_timer);
220 process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true,
221 is_alive, keep_alive, complete_gc, task_executor);
222 }
223
224 update_soft_ref_master_clock();
225
226 // Weak references
227 {
228 GCTraceTime(Debug, gc, ref) tt("WeakReference", gc_timer);
229 process_discovered_reflist(_discoveredWeakRefs, NULL, true,
230 is_alive, keep_alive, complete_gc, task_executor);
231 }
232
233 // Final references
234 {
235 GCTraceTime(Debug, gc, ref) tt("FinalReference", gc_timer);
236 process_discovered_reflist(_discoveredFinalRefs, NULL, false,
237 is_alive, keep_alive, complete_gc, task_executor);
238 }
239
240 // Phantom references
241 {
242 GCTraceTime(Debug, gc, ref) tt("PhantomReference", gc_timer);
243 process_discovered_reflist(_discoveredPhantomRefs, NULL, true,
244 is_alive, keep_alive, complete_gc, task_executor);
245 }
246
247 // Weak global JNI references. It would make more sense (semantically) to
248 // traverse these simultaneously with the regular weak references above, but
249 // that is not how the JDK1.2 specification is. See #4126360. Native code can
250 // thus use JNI weak references to circumvent the phantom references and
251 // resurrect a "post-mortem" object.
252 {
253 GCTraceTime(Debug, gc, ref) tt("JNI Weak Reference", gc_timer);
254 if (task_executor != NULL) {
255 task_executor->set_single_threaded_mode();
256 }
257 process_phaseJNI(is_alive, keep_alive, complete_gc);
258 }
259
260 log_debug(gc, ref)("Ref Counts: Soft: " SIZE_FORMAT " Weak: " SIZE_FORMAT " Final: " SIZE_FORMAT " Phantom: " SIZE_FORMAT,
261 stats.soft_count(), stats.weak_count(), stats.final_count(), stats.phantom_count());
262 log_develop_trace(gc, ref)("JNI Weak Reference count: " SIZE_FORMAT, count_jni_refs());
263
264 return stats;
265 }
266
267 #ifndef PRODUCT
268 // Calculate the number of jni handles.
269 size_t ReferenceProcessor::count_jni_refs() {
270 class CountHandleClosure: public OopClosure {
271 private:
272 size_t _count;
273 public:
274 CountHandleClosure(): _count(0) {}
275 void do_oop(oop* unused) { _count++; }
276 void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
277 size_t count() { return _count; }
278 };
279 CountHandleClosure global_handle_count;
280 JNIHandles::weak_oops_do(&global_handle_count);
281 return global_handle_count.count();
282 }
283 #endif
284
285 void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive,
286 OopClosure* keep_alive,
287 VoidClosure* complete_gc) {
288 JNIHandles::weak_oops_do(is_alive, keep_alive);
289 complete_gc->do_void();
290 }
291
292 void ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) {
293 // Enqueue references that are not made active again, and
294 // clear the decks for the next collection (cycle).
295 enqueue_discovered_reflists(task_executor);
296
297 // Stop treating discovered references specially.
298 disable_discovery();
299 }
300
301 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list) {
302 // Given a list of refs linked through the "discovered" field
303 // (java.lang.ref.Reference.discovered), self-loop their "next" field
304 // thus distinguishing them from active References, then
305 // prepend them to the pending list.
306 //
307 // The Java threads will see the Reference objects linked together through
308 // the discovered field. Instead of trying to do the write barrier updates
309 // in all places in the reference processor where we manipulate the discovered
310 // field we make sure to do the barrier here where we anyway iterate through
311 // all linked Reference objects. Note that it is important to not dirty any
312 // cards during reference processing since this will cause card table
313 // verification to fail for G1.
314 log_develop_trace(gc, ref)("ReferenceProcessor::enqueue_discovered_reflist list " INTPTR_FORMAT, p2i(&refs_list));
315
328 // Self-loop next, so as to make Ref not active.
329 java_lang_ref_Reference::set_next_raw(obj, obj);
330 if (next_d != obj) {
331 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), next_d);
332 } else {
333 // This is the last object.
334 // Swap refs_list into pending list and set obj's
335 // discovered to what we read from the pending list.
336 oop old = Universe::swap_reference_pending_list(refs_list.head());
337 java_lang_ref_Reference::set_discovered_raw(obj, old); // old may be NULL
338 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old);
339 }
340 }
341 }
342
343 // Parallel enqueue task
344 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask {
345 public:
346 RefProcEnqueueTask(ReferenceProcessor& ref_processor,
347 DiscoveredList discovered_refs[],
348 int n_queues)
349 : EnqueueTask(ref_processor, discovered_refs, n_queues)
350 { }
351
352 virtual void work(unsigned int work_id) {
353 assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds");
354 // Simplest first cut: static partitioning.
355 int index = work_id;
356 // The increment on "index" must correspond to the maximum number of queues
357 // (n_queues) with which that ReferenceProcessor was created. That
358 // is because of the "clever" way the discovered references lists were
359 // allocated and are indexed into.
360 assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected");
361 for (int j = 0;
362 j < ReferenceProcessor::number_of_subclasses_of_ref();
363 j++, index += _n_queues) {
364 _ref_processor.enqueue_discovered_reflist(_refs_lists[index]);
365 _refs_lists[index].set_head(NULL);
366 _refs_lists[index].set_length(0);
367 }
368 }
369 };
370
371 // Enqueue references that are not made active again
372 void ReferenceProcessor::enqueue_discovered_reflists(AbstractRefProcTaskExecutor* task_executor) {
373 if (_processing_is_mt && task_executor != NULL) {
374 // Parallel code
375 RefProcEnqueueTask tsk(*this, _discovered_refs, _max_num_q);
376 task_executor->execute(tsk);
377 } else {
378 // Serial code: call the parent class's implementation
379 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
380 enqueue_discovered_reflist(_discovered_refs[i]);
381 _discovered_refs[i].set_head(NULL);
382 _discovered_refs[i].set_length(0);
383 }
384 }
385 }
386
387 void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
388 _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref);
389 oop discovered = java_lang_ref_Reference::discovered(_ref);
390 assert(_discovered_addr && discovered->is_oop_or_null(),
391 "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered));
392 _next = discovered;
393 _referent_addr = java_lang_ref_Reference::referent_addr(_ref);
394 _referent = java_lang_ref_Reference::referent(_ref);
395 assert(Universe::heap()->is_in_reserved_or_null(_referent),
452 while (iter.has_next()) {
453 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));
454 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive();
455 if (referent_is_dead &&
456 !policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) {
457 log_develop_trace(gc, ref)("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy",
458 p2i(iter.obj()), iter.obj()->klass()->internal_name());
459 // Remove Reference object from list
460 iter.remove();
461 // keep the referent around
462 iter.make_referent_alive();
463 iter.move_to_next();
464 } else {
465 iter.next();
466 }
467 }
468 // Close the reachable set
469 complete_gc->do_void();
470 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " dead Refs out of " SIZE_FORMAT " discovered Refs by policy, from list " INTPTR_FORMAT,
471 iter.removed(), iter.processed(), p2i(&refs_list));
472 }
473
474 // Traverse the list and remove any Refs that are not active, or
475 // whose referents are either alive or NULL.
476 void
477 ReferenceProcessor::pp2_work(DiscoveredList& refs_list,
478 BoolObjectClosure* is_alive,
479 OopClosure* keep_alive) {
480 assert(discovery_is_atomic(), "Error");
481 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
482 while (iter.has_next()) {
483 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
484 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());)
485 assert(next == NULL, "Should not discover inactive Reference");
486 if (iter.is_referent_alive()) {
487 log_develop_trace(gc, ref)("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)",
488 p2i(iter.obj()), iter.obj()->klass()->internal_name());
489 // The referent is reachable after all.
490 // Remove Reference object from list.
491 iter.remove();
492 // Update the referent pointer as necessary: Note that this
581 oop next = refs_list.head();
582 while (next != obj) {
583 obj = next;
584 next = java_lang_ref_Reference::discovered(obj);
585 java_lang_ref_Reference::set_discovered_raw(obj, NULL);
586 }
587 refs_list.set_head(NULL);
588 refs_list.set_length(0);
589 }
590
591 void ReferenceProcessor::abandon_partial_discovery() {
592 // loop over the lists
593 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
594 if ((i % _max_num_q) == 0) {
595 log_develop_trace(gc, ref)("Abandoning %s discovered list", list_name(i));
596 }
597 clear_discovered_references(_discovered_refs[i]);
598 }
599 }
600
601 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask {
602 public:
603 RefProcPhase1Task(ReferenceProcessor& ref_processor,
604 DiscoveredList refs_lists[],
605 ReferencePolicy* policy,
606 bool marks_oops_alive)
607 : ProcessTask(ref_processor, refs_lists, marks_oops_alive),
608 _policy(policy)
609 { }
610 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
611 OopClosure& keep_alive,
612 VoidClosure& complete_gc)
613 {
614 _ref_processor.process_phase1(_refs_lists[i], _policy,
615 &is_alive, &keep_alive, &complete_gc);
616 }
617 private:
618 ReferencePolicy* _policy;
619 };
620
621 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask {
622 public:
623 RefProcPhase2Task(ReferenceProcessor& ref_processor,
624 DiscoveredList refs_lists[],
625 bool marks_oops_alive)
626 : ProcessTask(ref_processor, refs_lists, marks_oops_alive)
627 { }
628 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
629 OopClosure& keep_alive,
630 VoidClosure& complete_gc)
631 {
632 _ref_processor.process_phase2(_refs_lists[i],
633 &is_alive, &keep_alive, &complete_gc);
634 }
635 };
636
637 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask {
638 public:
639 RefProcPhase3Task(ReferenceProcessor& ref_processor,
640 DiscoveredList refs_lists[],
641 bool clear_referent,
642 bool marks_oops_alive)
643 : ProcessTask(ref_processor, refs_lists, marks_oops_alive),
644 _clear_referent(clear_referent)
645 { }
646 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
647 OopClosure& keep_alive,
648 VoidClosure& complete_gc)
649 {
650 _ref_processor.process_phase3(_refs_lists[i], _clear_referent,
651 &is_alive, &keep_alive, &complete_gc);
652 }
653 private:
654 bool _clear_referent;
655 };
656
657 #ifndef PRODUCT
658 void ReferenceProcessor::log_reflist_counts(DiscoveredList ref_lists[], uint active_length, size_t total_refs) {
659 if (!log_is_enabled(Trace, gc, ref)) {
660 return;
661 }
662
663 stringStream st;
664 for (uint i = 0; i < active_length; ++i) {
665 st.print(SIZE_FORMAT " ", ref_lists[i].length());
666 }
667 log_develop_trace(gc, ref)("%s= " SIZE_FORMAT, st.as_string(), total_refs);
668 #ifdef ASSERT
669 for (uint i = active_length; i < _max_num_q; i++) {
759 }
760 log_reflist_counts(ref_lists, _num_q, balanced_total_refs);
761 assert(total_refs == balanced_total_refs, "Balancing was incomplete");
762 #endif
763 }
764
765 void ReferenceProcessor::balance_all_queues() {
766 balance_queues(_discoveredSoftRefs);
767 balance_queues(_discoveredWeakRefs);
768 balance_queues(_discoveredFinalRefs);
769 balance_queues(_discoveredPhantomRefs);
770 }
771
772 void ReferenceProcessor::process_discovered_reflist(
773 DiscoveredList refs_lists[],
774 ReferencePolicy* policy,
775 bool clear_referent,
776 BoolObjectClosure* is_alive,
777 OopClosure* keep_alive,
778 VoidClosure* complete_gc,
779 AbstractRefProcTaskExecutor* task_executor)
780 {
781 bool mt_processing = task_executor != NULL && _processing_is_mt;
782 // If discovery used MT and a dynamic number of GC threads, then
783 // the queues must be balanced for correctness if fewer than the
784 // maximum number of queues were used. The number of queue used
785 // during discovery may be different than the number to be used
786 // for processing so don't depend of _num_q < _max_num_q as part
787 // of the test.
788 bool must_balance = _discovery_is_mt;
789
790 if ((mt_processing && ParallelRefProcBalancingEnabled) ||
791 must_balance) {
792 balance_queues(refs_lists);
793 }
794
795 // Phase 1 (soft refs only):
796 // . Traverse the list and remove any SoftReferences whose
797 // referents are not alive, but that should be kept alive for
798 // policy reasons. Keep alive the transitive closure of all
799 // such referents.
800 if (policy != NULL) {
801 if (mt_processing) {
802 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/);
803 task_executor->execute(phase1);
804 } else {
805 for (uint i = 0; i < _max_num_q; i++) {
806 process_phase1(refs_lists[i], policy,
807 is_alive, keep_alive, complete_gc);
808 }
809 }
810 } else { // policy == NULL
811 assert(refs_lists != _discoveredSoftRefs,
812 "Policy must be specified for soft references.");
813 }
814
815 // Phase 2:
816 // . Traverse the list and remove any refs whose referents are alive.
817 if (mt_processing) {
818 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/);
819 task_executor->execute(phase2);
820 } else {
821 for (uint i = 0; i < _max_num_q; i++) {
822 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc);
823 }
824 }
825
826 // Phase 3:
827 // . Traverse the list and process referents as appropriate.
828 if (mt_processing) {
829 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/);
830 task_executor->execute(phase3);
831 } else {
832 for (uint i = 0; i < _max_num_q; i++) {
833 process_phase3(refs_lists[i], clear_referent,
834 is_alive, keep_alive, complete_gc);
835 }
836 }
837 }
838
839 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) {
840 uint id = 0;
841 // Determine the queue index to use for this object.
842 if (_discovery_is_mt) {
843 // During a multi-threaded discovery phase,
844 // each thread saves to its "own" list.
845 Thread* thr = Thread::current();
846 id = thr->as_Worker_thread()->id();
847 } else {
848 // single-threaded discovery, we save in round-robin
849 // fashion to each of the lists.
850 if (_processing_is_mt) {
851 id = next_id();
852 }
853 }
854 assert(id < _max_num_q, "Id is out-of-bounds id %u and max id %u)", id, _max_num_q);
855
856 // Get the discovered queue to which we will add
1179 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " Refs out of " SIZE_FORMAT " Refs in discovered list " INTPTR_FORMAT,
1180 iter.removed(), iter.processed(), p2i(&refs_list));
1181 }
1182 )
1183 }
1184
1185 const char* ReferenceProcessor::list_name(uint i) {
1186 assert(i <= _max_num_q * number_of_subclasses_of_ref(),
1187 "Out of bounds index");
1188
1189 int j = i / _max_num_q;
1190 switch (j) {
1191 case 0: return "SoftRef";
1192 case 1: return "WeakRef";
1193 case 2: return "FinalRef";
1194 case 3: return "PhantomRef";
1195 }
1196 ShouldNotReachHere();
1197 return NULL;
1198 }
1199
|
1 /*
2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
162
163 NOT_PRODUCT(
164 if (now < _soft_ref_timestamp_clock) {
165 log_warning(gc)("time warp: " JLONG_FORMAT " to " JLONG_FORMAT,
166 _soft_ref_timestamp_clock, now);
167 }
168 )
169 // The values of now and _soft_ref_timestamp_clock are set using
170 // javaTimeNanos(), which is guaranteed to be monotonically
171 // non-decreasing provided the underlying platform provides such
172 // a time source (and it is bug free).
173 // In product mode, however, protect ourselves from non-monotonicity.
174 if (now > _soft_ref_timestamp_clock) {
175 _soft_ref_timestamp_clock = now;
176 java_lang_ref_SoftReference::set_clock(now);
177 }
178 // Else leave clock stalled at its old value until time progresses
179 // past clock value.
180 }
181
182 size_t ReferenceProcessor::total_count(DiscoveredList lists[]) const {
183 size_t total = 0;
184 for (uint i = 0; i < _max_num_q; ++i) {
185 total += lists[i].length();
186 }
187 return total;
188 }
189
190 ReferenceProcessorStats ReferenceProcessor::process_discovered_references(
191 BoolObjectClosure* is_alive,
192 OopClosure* keep_alive,
193 VoidClosure* complete_gc,
194 AbstractRefProcTaskExecutor* task_executor,
195 ReferenceProcessorPhaseTimes* phase_times) {
196
197 double start_time = os::elapsedTime();
198
199 assert(!enqueuing_is_done(), "If here enqueuing should not be complete");
200 // Stop treating discovered references specially.
201 disable_discovery();
202
203 // If discovery was concurrent, someone could have modified
204 // the value of the static field in the j.l.r.SoftReference
205 // class that holds the soft reference timestamp clock using
206 // reflection or Unsafe between when discovery was enabled and
207 // now. Unconditionally update the static field in ReferenceProcessor
208 // here so that we use the new value during processing of the
209 // discovered soft refs.
210
211 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock();
212
213 ReferenceProcessorStats stats(total_count(_discoveredSoftRefs),
214 total_count(_discoveredWeakRefs),
215 total_count(_discoveredFinalRefs),
216 total_count(_discoveredPhantomRefs));
217
218 // Soft references
219 {
220 RefProcPhaseTimesTracker tt(REF_SOFT, phase_times, this);
221 process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true,
222 is_alive, keep_alive, complete_gc, task_executor, phase_times);
223 }
224
225 update_soft_ref_master_clock();
226
227 // Weak references
228 {
229 RefProcPhaseTimesTracker tt(REF_WEAK, phase_times, this);
230 process_discovered_reflist(_discoveredWeakRefs, NULL, true,
231 is_alive, keep_alive, complete_gc, task_executor, phase_times);
232 }
233
234 // Final references
235 {
236 RefProcPhaseTimesTracker tt(REF_FINAL, phase_times, this);
237 process_discovered_reflist(_discoveredFinalRefs, NULL, false,
238 is_alive, keep_alive, complete_gc, task_executor, phase_times);
239 }
240
241 // Phantom references
242 {
243 RefProcPhaseTimesTracker tt(REF_PHANTOM, phase_times, this);
244 process_discovered_reflist(_discoveredPhantomRefs, NULL, true,
245 is_alive, keep_alive, complete_gc, task_executor, phase_times);
246 }
247
248 // Weak global JNI references. It would make more sense (semantically) to
249 // traverse these simultaneously with the regular weak references above, but
250 // that is not how the JDK1.2 specification is. See #4126360. Native code can
251 // thus use JNI weak references to circumvent the phantom references and
252 // resurrect a "post-mortem" object.
253 {
254 GCTraceTime(Debug, gc, ref) tt("JNI Weak Reference", phase_times->gc_timer());
255 if (task_executor != NULL) {
256 task_executor->set_single_threaded_mode();
257 }
258 process_phaseJNI(is_alive, keep_alive, complete_gc);
259 }
260
261 phase_times->set_total_time_ms((os::elapsedTime() - start_time) * 1000);
262
263 log_develop_trace(gc, ref)("JNI Weak Reference count: " SIZE_FORMAT, count_jni_refs());
264
265 return stats;
266 }
267
268 #ifndef PRODUCT
269 // Calculate the number of jni handles.
270 size_t ReferenceProcessor::count_jni_refs() {
271 class CountHandleClosure: public OopClosure {
272 private:
273 size_t _count;
274 public:
275 CountHandleClosure(): _count(0) {}
276 void do_oop(oop* unused) { _count++; }
277 void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
278 size_t count() { return _count; }
279 };
280 CountHandleClosure global_handle_count;
281 JNIHandles::weak_oops_do(&global_handle_count);
282 return global_handle_count.count();
283 }
284 #endif
285
286 void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive,
287 OopClosure* keep_alive,
288 VoidClosure* complete_gc) {
289 JNIHandles::weak_oops_do(is_alive, keep_alive);
290 complete_gc->do_void();
291 }
292
293 void ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor,
294 ReferenceProcessorPhaseTimes* phase_times) {
295 // Enqueue references that are not made active again, and
296 // clear the decks for the next collection (cycle).
297 enqueue_discovered_reflists(task_executor, phase_times);
298
299 // Stop treating discovered references specially.
300 disable_discovery();
301 }
302
303 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list) {
304 // Given a list of refs linked through the "discovered" field
305 // (java.lang.ref.Reference.discovered), self-loop their "next" field
306 // thus distinguishing them from active References, then
307 // prepend them to the pending list.
308 //
309 // The Java threads will see the Reference objects linked together through
310 // the discovered field. Instead of trying to do the write barrier updates
311 // in all places in the reference processor where we manipulate the discovered
312 // field we make sure to do the barrier here where we anyway iterate through
313 // all linked Reference objects. Note that it is important to not dirty any
314 // cards during reference processing since this will cause card table
315 // verification to fail for G1.
316 log_develop_trace(gc, ref)("ReferenceProcessor::enqueue_discovered_reflist list " INTPTR_FORMAT, p2i(&refs_list));
317
330 // Self-loop next, so as to make Ref not active.
331 java_lang_ref_Reference::set_next_raw(obj, obj);
332 if (next_d != obj) {
333 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), next_d);
334 } else {
335 // This is the last object.
336 // Swap refs_list into pending list and set obj's
337 // discovered to what we read from the pending list.
338 oop old = Universe::swap_reference_pending_list(refs_list.head());
339 java_lang_ref_Reference::set_discovered_raw(obj, old); // old may be NULL
340 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old);
341 }
342 }
343 }
344
345 // Parallel enqueue task
346 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask {
347 public:
348 RefProcEnqueueTask(ReferenceProcessor& ref_processor,
349 DiscoveredList discovered_refs[],
350 int n_queues,
351 ReferenceProcessorPhaseTimes* phase_times)
352 : EnqueueTask(ref_processor, discovered_refs, n_queues, phase_times)
353 { }
354
355 virtual void work(unsigned int work_id) {
356 RefProcWorkerTimeTracker tt(ReferenceProcessorPhaseTimes::RefEnqueue, _phase_times, work_id);
357
358 assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds");
359 // Simplest first cut: static partitioning.
360 int index = work_id;
361 // The increment on "index" must correspond to the maximum number of queues
362 // (n_queues) with which that ReferenceProcessor was created. That
363 // is because of the "clever" way the discovered references lists were
364 // allocated and are indexed into.
365 assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected");
366 for (int j = 0;
367 j < ReferenceProcessor::number_of_subclasses_of_ref();
368 j++, index += _n_queues) {
369 _ref_processor.enqueue_discovered_reflist(_refs_lists[index]);
370 _refs_lists[index].set_head(NULL);
371 _refs_lists[index].set_length(0);
372 }
373 }
374 };
375
376 // Enqueue references that are not made active again
377 void ReferenceProcessor::enqueue_discovered_reflists(AbstractRefProcTaskExecutor* task_executor,
378 ReferenceProcessorPhaseTimes* phase_times) {
379
380 ReferenceProcessorStats stats(total_count(_discoveredSoftRefs),
381 total_count(_discoveredWeakRefs),
382 total_count(_discoveredFinalRefs),
383 total_count(_discoveredPhantomRefs));
384
385 RefProcEnqueueTimeTracker tt(phase_times, stats);
386
387 if (_processing_is_mt && task_executor != NULL) {
388 // Parallel code
389 RefProcEnqueueTask tsk(*this, _discovered_refs, _max_num_q, phase_times);
390 task_executor->execute(tsk);
391 } else {
392 // Serial code: call the parent class's implementation
393 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
394 enqueue_discovered_reflist(_discovered_refs[i]);
395 _discovered_refs[i].set_head(NULL);
396 _discovered_refs[i].set_length(0);
397 }
398 }
399 }
400
401 void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
402 _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref);
403 oop discovered = java_lang_ref_Reference::discovered(_ref);
404 assert(_discovered_addr && discovered->is_oop_or_null(),
405 "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered));
406 _next = discovered;
407 _referent_addr = java_lang_ref_Reference::referent_addr(_ref);
408 _referent = java_lang_ref_Reference::referent(_ref);
409 assert(Universe::heap()->is_in_reserved_or_null(_referent),
466 while (iter.has_next()) {
467 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));
468 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive();
469 if (referent_is_dead &&
470 !policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) {
471 log_develop_trace(gc, ref)("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy",
472 p2i(iter.obj()), iter.obj()->klass()->internal_name());
473 // Remove Reference object from list
474 iter.remove();
475 // keep the referent around
476 iter.make_referent_alive();
477 iter.move_to_next();
478 } else {
479 iter.next();
480 }
481 }
482 // Close the reachable set
483 complete_gc->do_void();
484 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " dead Refs out of " SIZE_FORMAT " discovered Refs by policy, from list " INTPTR_FORMAT,
485 iter.removed(), iter.processed(), p2i(&refs_list));
486 }
487
488 // Traverse the list and remove any Refs that are not active, or
489 // whose referents are either alive or NULL.
490 void
491 ReferenceProcessor::pp2_work(DiscoveredList& refs_list,
492 BoolObjectClosure* is_alive,
493 OopClosure* keep_alive) {
494 assert(discovery_is_atomic(), "Error");
495 DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
496 while (iter.has_next()) {
497 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
498 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());)
499 assert(next == NULL, "Should not discover inactive Reference");
500 if (iter.is_referent_alive()) {
501 log_develop_trace(gc, ref)("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)",
502 p2i(iter.obj()), iter.obj()->klass()->internal_name());
503 // The referent is reachable after all.
504 // Remove Reference object from list.
505 iter.remove();
506 // Update the referent pointer as necessary: Note that this
595 oop next = refs_list.head();
596 while (next != obj) {
597 obj = next;
598 next = java_lang_ref_Reference::discovered(obj);
599 java_lang_ref_Reference::set_discovered_raw(obj, NULL);
600 }
601 refs_list.set_head(NULL);
602 refs_list.set_length(0);
603 }
604
605 void ReferenceProcessor::abandon_partial_discovery() {
606 // loop over the lists
607 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
608 if ((i % _max_num_q) == 0) {
609 log_develop_trace(gc, ref)("Abandoning %s discovered list", list_name(i));
610 }
611 clear_discovered_references(_discovered_refs[i]);
612 }
613 }
614
615 size_t ReferenceProcessor::total_reference_count(ReferenceType type) const {
616 DiscoveredList* list = NULL;
617
618 switch (type) {
619 case REF_SOFT:
620 list = _discoveredSoftRefs;
621 break;
622 case REF_WEAK:
623 list = _discoveredWeakRefs;
624 break;
625 case REF_FINAL:
626 list = _discoveredFinalRefs;
627 break;
628 case REF_PHANTOM:
629 list = _discoveredPhantomRefs;
630 break;
631 case REF_OTHER:
632 case REF_NONE:
633 default:
634 ShouldNotReachHere();
635 }
636 return total_count(list);
637 }
638
639 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask {
640 public:
641 RefProcPhase1Task(ReferenceProcessor& ref_processor,
642 DiscoveredList refs_lists[],
643 ReferencePolicy* policy,
644 bool marks_oops_alive,
645 ReferenceProcessorPhaseTimes* phase_times)
646 : ProcessTask(ref_processor, refs_lists, marks_oops_alive, phase_times),
647 _policy(policy)
648 { }
649 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
650 OopClosure& keep_alive,
651 VoidClosure& complete_gc)
652 {
653 RefProcWorkerTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase1, _phase_times, i);
654
655 _ref_processor.process_phase1(_refs_lists[i], _policy,
656 &is_alive, &keep_alive, &complete_gc);
657 }
658 private:
659 ReferencePolicy* _policy;
660 };
661
662 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask {
663 public:
664 RefProcPhase2Task(ReferenceProcessor& ref_processor,
665 DiscoveredList refs_lists[],
666 bool marks_oops_alive,
667 ReferenceProcessorPhaseTimes* phase_times)
668 : ProcessTask(ref_processor, refs_lists, marks_oops_alive, phase_times)
669 { }
670 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
671 OopClosure& keep_alive,
672 VoidClosure& complete_gc)
673 {
674 RefProcWorkerTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase2, _phase_times, i);
675
676 _ref_processor.process_phase2(_refs_lists[i],
677 &is_alive, &keep_alive, &complete_gc);
678 }
679 };
680
681 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask {
682 public:
683 RefProcPhase3Task(ReferenceProcessor& ref_processor,
684 DiscoveredList refs_lists[],
685 bool clear_referent,
686 bool marks_oops_alive,
687 ReferenceProcessorPhaseTimes* phase_times)
688 : ProcessTask(ref_processor, refs_lists, marks_oops_alive, phase_times),
689 _clear_referent(clear_referent)
690 { }
691 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
692 OopClosure& keep_alive,
693 VoidClosure& complete_gc)
694 {
695 RefProcWorkerTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase3, _phase_times, i);
696
697 _ref_processor.process_phase3(_refs_lists[i], _clear_referent,
698 &is_alive, &keep_alive, &complete_gc);
699 }
700 private:
701 bool _clear_referent;
702 };
703
704 #ifndef PRODUCT
705 void ReferenceProcessor::log_reflist_counts(DiscoveredList ref_lists[], uint active_length, size_t total_refs) {
706 if (!log_is_enabled(Trace, gc, ref)) {
707 return;
708 }
709
710 stringStream st;
711 for (uint i = 0; i < active_length; ++i) {
712 st.print(SIZE_FORMAT " ", ref_lists[i].length());
713 }
714 log_develop_trace(gc, ref)("%s= " SIZE_FORMAT, st.as_string(), total_refs);
715 #ifdef ASSERT
716 for (uint i = active_length; i < _max_num_q; i++) {
806 }
807 log_reflist_counts(ref_lists, _num_q, balanced_total_refs);
808 assert(total_refs == balanced_total_refs, "Balancing was incomplete");
809 #endif
810 }
811
812 void ReferenceProcessor::balance_all_queues() {
813 balance_queues(_discoveredSoftRefs);
814 balance_queues(_discoveredWeakRefs);
815 balance_queues(_discoveredFinalRefs);
816 balance_queues(_discoveredPhantomRefs);
817 }
818
819 void ReferenceProcessor::process_discovered_reflist(
820 DiscoveredList refs_lists[],
821 ReferencePolicy* policy,
822 bool clear_referent,
823 BoolObjectClosure* is_alive,
824 OopClosure* keep_alive,
825 VoidClosure* complete_gc,
826 AbstractRefProcTaskExecutor* task_executor,
827 ReferenceProcessorPhaseTimes* phase_times)
828 {
829 bool mt_processing = task_executor != NULL && _processing_is_mt;
830
831 phase_times->set_processing_is_mt(mt_processing);
832
833 // If discovery used MT and a dynamic number of GC threads, then
834 // the queues must be balanced for correctness if fewer than the
835 // maximum number of queues were used. The number of queue used
836 // during discovery may be different than the number to be used
837 // for processing so don't depend of _num_q < _max_num_q as part
838 // of the test.
839 bool must_balance = _discovery_is_mt;
840
841 if ((mt_processing && ParallelRefProcBalancingEnabled) ||
842 must_balance) {
843 RefProcBalanceQueuesTimeTracker tt(phase_times);
844 balance_queues(refs_lists);
845 }
846
847 // Phase 1 (soft refs only):
848 // . Traverse the list and remove any SoftReferences whose
849 // referents are not alive, but that should be kept alive for
850 // policy reasons. Keep alive the transitive closure of all
851 // such referents.
852 if (policy != NULL) {
853 RefProcParPhaseTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase1, phase_times);
854
855 if (mt_processing) {
856 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/, phase_times);
857 task_executor->execute(phase1);
858 } else {
859 for (uint i = 0; i < _max_num_q; i++) {
860 process_phase1(refs_lists[i], policy,
861 is_alive, keep_alive, complete_gc);
862 }
863 }
864 } else { // policy == NULL
865 assert(refs_lists != _discoveredSoftRefs,
866 "Policy must be specified for soft references.");
867 }
868
869 // Phase 2:
870 // . Traverse the list and remove any refs whose referents are alive.
871 {
872 RefProcParPhaseTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase2, phase_times);
873
874 if (mt_processing) {
875 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/, phase_times);
876 task_executor->execute(phase2);
877 } else {
878 for (uint i = 0; i < _max_num_q; i++) {
879 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc);
880 }
881 }
882 }
883
884 // Phase 3:
885 // . Traverse the list and process referents as appropriate.
886 {
887 RefProcParPhaseTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase3, phase_times);
888
889 if (mt_processing) {
890 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/, phase_times);
891 task_executor->execute(phase3);
892 } else {
893 for (uint i = 0; i < _max_num_q; i++) {
894 process_phase3(refs_lists[i], clear_referent,
895 is_alive, keep_alive, complete_gc);
896 }
897 }
898 }
899 }
900
901 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) {
902 uint id = 0;
903 // Determine the queue index to use for this object.
904 if (_discovery_is_mt) {
905 // During a multi-threaded discovery phase,
906 // each thread saves to its "own" list.
907 Thread* thr = Thread::current();
908 id = thr->as_Worker_thread()->id();
909 } else {
910 // single-threaded discovery, we save in round-robin
911 // fashion to each of the lists.
912 if (_processing_is_mt) {
913 id = next_id();
914 }
915 }
916 assert(id < _max_num_q, "Id is out-of-bounds id %u and max id %u)", id, _max_num_q);
917
918 // Get the discovered queue to which we will add
1241 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " Refs out of " SIZE_FORMAT " Refs in discovered list " INTPTR_FORMAT,
1242 iter.removed(), iter.processed(), p2i(&refs_list));
1243 }
1244 )
1245 }
1246
1247 const char* ReferenceProcessor::list_name(uint i) {
1248 assert(i <= _max_num_q * number_of_subclasses_of_ref(),
1249 "Out of bounds index");
1250
1251 int j = i / _max_num_q;
1252 switch (j) {
1253 case 0: return "SoftRef";
1254 case 1: return "WeakRef";
1255 case 2: return "FinalRef";
1256 case 3: return "PhantomRef";
1257 }
1258 ShouldNotReachHere();
1259 return NULL;
1260 }
|