19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/javaClasses.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "gc_interface/collectedHeap.hpp"
29 #include "gc_interface/collectedHeap.inline.hpp"
30 #include "memory/referencePolicy.hpp"
31 #include "memory/referenceProcessor.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "runtime/java.hpp"
34 #include "runtime/jniHandles.hpp"
35
36 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL;
37 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL;
38 oop ReferenceProcessor::_sentinelRef = NULL;
39 const int subclasses_of_ref = REF_PHANTOM - REF_OTHER;
40
41 // List of discovered references.
42 class DiscoveredList {
43 public:
44 DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
45 oop head() const {
46 return UseCompressedOops ? oopDesc::decode_heap_oop_not_null(_compressed_head) :
47 _oop_head;
48 }
49 HeapWord* adr_head() {
50 return UseCompressedOops ? (HeapWord*)&_compressed_head :
51 (HeapWord*)&_oop_head;
52 }
53 void set_head(oop o) {
54 if (UseCompressedOops) {
55 // Must compress the head ptr.
56 _compressed_head = oopDesc::encode_heap_oop_not_null(o);
57 } else {
58 _oop_head = o;
59 }
60 }
61 bool empty() const { return head() == ReferenceProcessor::sentinel_ref(); }
62 size_t length() { return _len; }
63 void set_length(size_t len) { _len = len; }
64 void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
65 void dec_length(size_t dec) { _len -= dec; }
66 private:
67 // Set value depending on UseCompressedOops. This could be a template class
68 // but then we have to fix all the instantiations and declarations that use this class.
69 oop _oop_head;
70 narrowOop _compressed_head;
71 size_t _len;
72 };
73
74 void referenceProcessor_init() {
75 ReferenceProcessor::init_statics();
76 }
77
78 void ReferenceProcessor::init_statics() {
79 assert(_sentinelRef == NULL, "should be initialized precisely once");
80 EXCEPTION_MARK;
81 _sentinelRef = instanceKlass::cast(
82 SystemDictionary::Reference_klass())->
83 allocate_permanent_instance(THREAD);
84
85 // Initialize the master soft ref clock.
86 java_lang_ref_SoftReference::set_clock(os::javaTimeMillis());
87
88 if (HAS_PENDING_EXCEPTION) {
89 Handle ex(THREAD, PENDING_EXCEPTION);
90 vm_exit_during_initialization(ex);
91 }
92 assert(_sentinelRef != NULL && _sentinelRef->is_oop(),
106 bool mt_processing,
107 int mt_processing_degree,
108 bool mt_discovery,
109 int mt_discovery_degree,
110 bool atomic_discovery,
111 BoolObjectClosure* is_alive_non_header,
112 bool discovered_list_needs_barrier) :
113 _discovering_refs(false),
114 _enqueuing_is_done(false),
115 _is_alive_non_header(is_alive_non_header),
116 _discovered_list_needs_barrier(discovered_list_needs_barrier),
117 _bs(NULL),
118 _processing_is_mt(mt_processing),
119 _next_id(0)
120 {
121 _span = span;
122 _discovery_is_atomic = atomic_discovery;
123 _discovery_is_mt = mt_discovery;
124 _num_q = MAX2(1, mt_processing_degree);
125 _max_num_q = MAX2(_num_q, mt_discovery_degree);
126 _discoveredSoftRefs = NEW_C_HEAP_ARRAY(DiscoveredList, _max_num_q * subclasses_of_ref);
127 if (_discoveredSoftRefs == NULL) {
128 vm_exit_during_initialization("Could not allocated RefProc Array");
129 }
130 _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_q];
131 _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_q];
132 _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q];
133 assert(sentinel_ref() != NULL, "_sentinelRef is NULL");
134 // Initialized all entries to _sentinelRef
135 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
136 _discoveredSoftRefs[i].set_head(sentinel_ref());
137 _discoveredSoftRefs[i].set_length(0);
138 }
139 // If we do barreirs, cache a copy of the barrier set.
140 if (discovered_list_needs_barrier) {
141 _bs = Universe::heap()->barrier_set();
142 }
143 setup_policy(false /* default soft ref policy */);
144 }
145
146 #ifndef PRODUCT
147 void ReferenceProcessor::verify_no_references_recorded() {
148 guarantee(!_discovering_refs, "Discovering refs?");
149 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
150 guarantee(_discoveredSoftRefs[i].empty(),
151 "Found non-empty discovered list");
152 }
153 }
154 #endif
155
156 void ReferenceProcessor::weak_oops_do(OopClosure* f) {
157 // Should this instead be
158 // for (int i = 0; i < subclasses_of_ref; i++_ {
159 // for (int j = 0; j < _num_q; j++) {
160 // int index = i * _max_num_q + j;
161 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
162 if (UseCompressedOops) {
163 f->do_oop((narrowOop*)_discoveredSoftRefs[i].adr_head());
164 } else {
165 f->do_oop((oop*)_discoveredSoftRefs[i].adr_head());
166 }
167 }
168 }
169
170 void ReferenceProcessor::oops_do(OopClosure* f) {
171 f->do_oop(adr_sentinel_ref());
172 }
173
174 void ReferenceProcessor::update_soft_ref_master_clock() {
175 // Update (advance) the soft ref master clock field. This must be done
176 // after processing the soft ref list.
177 jlong now = os::javaTimeMillis();
178 jlong clock = java_lang_ref_SoftReference::clock();
179 NOT_PRODUCT(
180 if (now < clock) {
181 warning("time warp: %d to %d", clock, now);
375 public:
376 RefProcEnqueueTask(ReferenceProcessor& ref_processor,
377 DiscoveredList discovered_refs[],
378 HeapWord* pending_list_addr,
379 oop sentinel_ref,
380 int n_queues)
381 : EnqueueTask(ref_processor, discovered_refs,
382 pending_list_addr, sentinel_ref, n_queues)
383 { }
384
385 virtual void work(unsigned int work_id) {
386 assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds");
387 // Simplest first cut: static partitioning.
388 int index = work_id;
389 // The increment on "index" must correspond to the maximum number of queues
390 // (n_queues) with which that ReferenceProcessor was created. That
391 // is because of the "clever" way the discovered references lists were
392 // allocated and are indexed into.
393 assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected");
394 for (int j = 0;
395 j < subclasses_of_ref;
396 j++, index += _n_queues) {
397 _ref_processor.enqueue_discovered_reflist(
398 _refs_lists[index], _pending_list_addr);
399 _refs_lists[index].set_head(_sentinel_ref);
400 _refs_lists[index].set_length(0);
401 }
402 }
403 };
404
405 // Enqueue references that are not made active again
406 void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr,
407 AbstractRefProcTaskExecutor* task_executor) {
408 if (_processing_is_mt && task_executor != NULL) {
409 // Parallel code
410 RefProcEnqueueTask tsk(*this, _discoveredSoftRefs,
411 pending_list_addr, sentinel_ref(), _max_num_q);
412 task_executor->execute(tsk);
413 } else {
414 // Serial code: call the parent class's implementation
415 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
416 enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr);
417 _discoveredSoftRefs[i].set_head(sentinel_ref());
418 _discoveredSoftRefs[i].set_length(0);
419 }
420 }
421 }
422
423 // Iterator for the list of discovered references.
424 class DiscoveredListIterator {
425 public:
426 inline DiscoveredListIterator(DiscoveredList& refs_list,
427 OopClosure* keep_alive,
428 BoolObjectClosure* is_alive);
429
430 // End Of List.
431 inline bool has_next() const { return _next != ReferenceProcessor::sentinel_ref(); }
432
433 // Get oop to the Reference object.
434 inline oop obj() const { return _ref; }
435
436 // Get oop to the referent object.
437 inline oop referent() const { return _referent; }
438
439 // Returns true if referent is alive.
440 inline bool is_referent_alive() const;
441
442 // Loads data for the current reference.
443 // The "allow_null_referent" argument tells us to allow for the possibility
444 // of a NULL referent in the discovered Reference object. This typically
445 // happens in the case of concurrent collectors that may have done the
446 // discovery concurrently, or interleaved, with mutator execution.
447 inline void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
448
449 // Move to the next discovered reference.
450 inline void next();
451
452 // Remove the current reference from the list
453 inline void remove();
454
455 // Make the Reference object active again.
456 inline void make_active() { java_lang_ref_Reference::set_next(_ref, NULL); }
457
458 // Make the referent alive.
459 inline void make_referent_alive() {
460 if (UseCompressedOops) {
461 _keep_alive->do_oop((narrowOop*)_referent_addr);
462 } else {
463 _keep_alive->do_oop((oop*)_referent_addr);
464 }
465 }
466
467 // Update the discovered field.
468 inline void update_discovered() {
469 // First _prev_next ref actually points into DiscoveredList (gross).
470 if (UseCompressedOops) {
471 _keep_alive->do_oop((narrowOop*)_prev_next);
472 } else {
473 _keep_alive->do_oop((oop*)_prev_next);
474 }
475 }
476
478 inline void clear_referent() { oop_store_raw(_referent_addr, NULL); }
479
480 // Statistics
481 NOT_PRODUCT(
482 inline size_t processed() const { return _processed; }
483 inline size_t removed() const { return _removed; }
484 )
485
486 inline void move_to_next();
487
488 private:
489 DiscoveredList& _refs_list;
490 HeapWord* _prev_next;
491 oop _ref;
492 HeapWord* _discovered_addr;
493 oop _next;
494 HeapWord* _referent_addr;
495 oop _referent;
496 OopClosure* _keep_alive;
497 BoolObjectClosure* _is_alive;
498 DEBUG_ONLY(
499 oop _first_seen; // cyclic linked list check
500 )
501 NOT_PRODUCT(
502 size_t _processed;
503 size_t _removed;
504 )
505 };
506
507 inline DiscoveredListIterator::DiscoveredListIterator(DiscoveredList& refs_list,
508 OopClosure* keep_alive,
509 BoolObjectClosure* is_alive)
510 : _refs_list(refs_list),
511 _prev_next(refs_list.adr_head()),
512 _ref(refs_list.head()),
513 #ifdef ASSERT
514 _first_seen(refs_list.head()),
515 #endif
516 #ifndef PRODUCT
517 _processed(0),
518 _removed(0),
519 #endif
520 _next(refs_list.head()),
521 _keep_alive(keep_alive),
522 _is_alive(is_alive)
523 { }
524
525 inline bool DiscoveredListIterator::is_referent_alive() const {
526 return _is_alive->do_object_b(_referent);
527 }
528
529 inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
530 _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref);
728 // Remember to keep sentinel pointer around
729 iter.update_discovered();
730 // Close the reachable set
731 complete_gc->do_void();
732 }
733
734 void
735 ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) {
736 oop obj = refs_list.head();
737 while (obj != sentinel_ref()) {
738 oop discovered = java_lang_ref_Reference::discovered(obj);
739 java_lang_ref_Reference::set_discovered_raw(obj, NULL);
740 obj = discovered;
741 }
742 refs_list.set_head(sentinel_ref());
743 refs_list.set_length(0);
744 }
745
746 void ReferenceProcessor::abandon_partial_discovery() {
747 // loop over the lists
748 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
749 if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
750 gclog_or_tty->print_cr("\nAbandoning %s discovered list",
751 list_name(i));
752 }
753 abandon_partial_discovered_list(_discoveredSoftRefs[i]);
754 }
755 }
756
757 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask {
758 public:
759 RefProcPhase1Task(ReferenceProcessor& ref_processor,
760 DiscoveredList refs_lists[],
761 ReferencePolicy* policy,
762 bool marks_oops_alive)
763 : ProcessTask(ref_processor, refs_lists, marks_oops_alive),
764 _policy(policy)
765 { }
766 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
767 OopClosure& keep_alive,
768 VoidClosure& complete_gc)
769 {
770 Thread* thr = Thread::current();
771 int refs_list_index = ((WorkerThread*)thr)->id();
850 assert(to_idx < _num_q, "Sanity Check!");
851 if (ref_lists[to_idx].length() < avg_refs) {
852 // move superfluous refs
853 size_t refs_to_move;
854 // Move all the Ref's if the from queue will not be processed.
855 if (move_all) {
856 refs_to_move = MIN2(ref_lists[from_idx].length(),
857 avg_refs - ref_lists[to_idx].length());
858 } else {
859 refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs,
860 avg_refs - ref_lists[to_idx].length());
861 }
862 oop move_head = ref_lists[from_idx].head();
863 oop move_tail = move_head;
864 oop new_head = move_head;
865 // find an element to split the list on
866 for (size_t j = 0; j < refs_to_move; ++j) {
867 move_tail = new_head;
868 new_head = java_lang_ref_Reference::discovered(new_head);
869 }
870 java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head());
871 ref_lists[to_idx].set_head(move_head);
872 ref_lists[to_idx].inc_length(refs_to_move);
873 ref_lists[from_idx].set_head(new_head);
874 ref_lists[from_idx].dec_length(refs_to_move);
875 if (ref_lists[from_idx].length() == 0) {
876 break;
877 }
878 } else {
879 to_idx = (to_idx + 1) % _num_q;
880 }
881 }
882 }
883 #ifdef ASSERT
884 size_t balanced_total_refs = 0;
885 for (int i = 0; i < _max_num_q; ++i) {
886 balanced_total_refs += ref_lists[i].length();
887 if (TraceReferenceGC && PrintGCDetails) {
888 gclog_or_tty->print("%d ", ref_lists[i].length());
889 }
890 }
964 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc);
965 }
966 }
967
968 // Phase 3:
969 // . Traverse the list and process referents as appropriate.
970 if (mt_processing) {
971 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/);
972 task_executor->execute(phase3);
973 } else {
974 for (int i = 0; i < _max_num_q; i++) {
975 process_phase3(refs_lists[i], clear_referent,
976 is_alive, keep_alive, complete_gc);
977 }
978 }
979 }
980
981 void ReferenceProcessor::clean_up_discovered_references() {
982 // loop over the lists
983 // Should this instead be
984 // for (int i = 0; i < subclasses_of_ref; i++_ {
985 // for (int j = 0; j < _num_q; j++) {
986 // int index = i * _max_num_q + j;
987 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
988 if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
989 gclog_or_tty->print_cr(
990 "\nScrubbing %s discovered list of Null referents",
991 list_name(i));
992 }
993 clean_up_discovered_reflist(_discoveredSoftRefs[i]);
994 }
995 }
996
997 void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) {
998 assert(!discovery_is_atomic(), "Else why call this method?");
999 DiscoveredListIterator iter(refs_list, NULL, NULL);
1000 while (iter.has_next()) {
1001 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
1002 oop next = java_lang_ref_Reference::next(iter.obj());
1003 assert(next->is_oop_or_null(), "bad next field");
1004 // If referent has been cleared or Reference is not active,
1005 // drop it.
1006 if (iter.referent() == NULL || next != NULL) {
1007 debug_only(
1188 // reachable.
1189 if (is_alive_non_header() != NULL) {
1190 verify_referent(obj);
1191 if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) {
1192 return false; // referent is reachable
1193 }
1194 }
1195 if (rt == REF_SOFT) {
1196 // For soft refs we can decide now if these are not
1197 // current candidates for clearing, in which case we
1198 // can mark through them now, rather than delaying that
1199 // to the reference-processing phase. Since all current
1200 // time-stamp policies advance the soft-ref clock only
1201 // at a major collection cycle, this is always currently
1202 // accurate.
1203 if (!_current_soft_ref_policy->should_clear_reference(obj)) {
1204 return false;
1205 }
1206 }
1207
1208 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
1209 const oop discovered = java_lang_ref_Reference::discovered(obj);
1210 assert(discovered->is_oop_or_null(), "bad discovered field");
1211 if (discovered != NULL) {
1212 // The reference has already been discovered...
1213 if (TraceReferenceGC) {
1214 gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)",
1215 obj, obj->blueprint()->internal_name());
1216 }
1217 if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
1218 // assumes that an object is not processed twice;
1219 // if it's been already discovered it must be on another
1220 // generation's discovered list; so we won't discover it.
1221 return false;
1222 } else {
1223 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery,
1224 "Unrecognized policy");
1225 // Check assumption that an object is not potentially
1226 // discovered twice except by concurrent collectors that potentially
1227 // trace the same Reference object twice.
1402 keep_alive->do_oop(next_addr);
1403 }
1404 iter.move_to_next();
1405 } else {
1406 iter.next();
1407 }
1408 }
1409 // Close the reachable set
1410 complete_gc->do_void();
1411
1412 NOT_PRODUCT(
1413 if (PrintGCDetails && PrintReferenceGC && (iter.processed() > 0)) {
1414 gclog_or_tty->print_cr(" Dropped %d Refs out of %d "
1415 "Refs in discovered list " INTPTR_FORMAT,
1416 iter.removed(), iter.processed(), (address)refs_list.head());
1417 }
1418 )
1419 }
1420
1421 const char* ReferenceProcessor::list_name(int i) {
1422 assert(i >= 0 && i <= _max_num_q * subclasses_of_ref, "Out of bounds index");
1423 int j = i / _max_num_q;
1424 switch (j) {
1425 case 0: return "SoftRef";
1426 case 1: return "WeakRef";
1427 case 2: return "FinalRef";
1428 case 3: return "PhantomRef";
1429 }
1430 ShouldNotReachHere();
1431 return NULL;
1432 }
1433
1434 #ifndef PRODUCT
1435 void ReferenceProcessor::verify_ok_to_handle_reflists() {
1436 // empty for now
1437 }
1438 #endif
1439
1440 void ReferenceProcessor::verify() {
1441 guarantee(sentinel_ref() != NULL && sentinel_ref()->is_oop(), "Lost _sentinelRef");
1442 }
1443
1444 #ifndef PRODUCT
1445 void ReferenceProcessor::clear_discovered_references() {
1446 guarantee(!_discovering_refs, "Discovering refs?");
1447 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
1448 oop obj = _discoveredSoftRefs[i].head();
1449 while (obj != sentinel_ref()) {
1450 oop next = java_lang_ref_Reference::discovered(obj);
1451 java_lang_ref_Reference::set_discovered(obj, (oop) NULL);
1452 obj = next;
1453 }
1454 _discoveredSoftRefs[i].set_head(sentinel_ref());
1455 _discoveredSoftRefs[i].set_length(0);
1456 }
1457 }
1458 #endif // PRODUCT
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/javaClasses.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "gc_interface/collectedHeap.hpp"
29 #include "gc_interface/collectedHeap.inline.hpp"
30 #include "memory/referencePolicy.hpp"
31 #include "memory/referenceProcessor.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "runtime/java.hpp"
34 #include "runtime/jniHandles.hpp"
35
36 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL;
37 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL;
38 oop ReferenceProcessor::_sentinelRef = NULL;
39
40 bool DiscoveredList::empty() const {
41 return head() == ReferenceProcessor::sentinel_ref();
42 }
43
44 void referenceProcessor_init() {
45 ReferenceProcessor::init_statics();
46 }
47
48 void ReferenceProcessor::init_statics() {
49 assert(_sentinelRef == NULL, "should be initialized precisely once");
50 EXCEPTION_MARK;
51 _sentinelRef = instanceKlass::cast(
52 SystemDictionary::Reference_klass())->
53 allocate_permanent_instance(THREAD);
54
55 // Initialize the master soft ref clock.
56 java_lang_ref_SoftReference::set_clock(os::javaTimeMillis());
57
58 if (HAS_PENDING_EXCEPTION) {
59 Handle ex(THREAD, PENDING_EXCEPTION);
60 vm_exit_during_initialization(ex);
61 }
62 assert(_sentinelRef != NULL && _sentinelRef->is_oop(),
76 bool mt_processing,
77 int mt_processing_degree,
78 bool mt_discovery,
79 int mt_discovery_degree,
80 bool atomic_discovery,
81 BoolObjectClosure* is_alive_non_header,
82 bool discovered_list_needs_barrier) :
83 _discovering_refs(false),
84 _enqueuing_is_done(false),
85 _is_alive_non_header(is_alive_non_header),
86 _discovered_list_needs_barrier(discovered_list_needs_barrier),
87 _bs(NULL),
88 _processing_is_mt(mt_processing),
89 _next_id(0)
90 {
91 _span = span;
92 _discovery_is_atomic = atomic_discovery;
93 _discovery_is_mt = mt_discovery;
94 _num_q = MAX2(1, mt_processing_degree);
95 _max_num_q = MAX2(_num_q, mt_discovery_degree);
96 _discoveredSoftRefs = NEW_C_HEAP_ARRAY(DiscoveredList, _max_num_q * subclasses_of_ref());
97 if (_discoveredSoftRefs == NULL) {
98 vm_exit_during_initialization("Could not allocated RefProc Array");
99 }
100 _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_q];
101 _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_q];
102 _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q];
103 assert(sentinel_ref() != NULL, "_sentinelRef is NULL");
104 // Initialized all entries to _sentinelRef
105 for (int i = 0; i < _max_num_q * subclasses_of_ref(); i++) {
106 _discoveredSoftRefs[i].set_head(sentinel_ref());
107 _discoveredSoftRefs[i].set_length(0);
108 }
109 // If we do barriers, cache a copy of the barrier set.
110 if (discovered_list_needs_barrier) {
111 _bs = Universe::heap()->barrier_set();
112 }
113 setup_policy(false /* default soft ref policy */);
114 }
115
116 #ifndef PRODUCT
117 void ReferenceProcessor::verify_no_references_recorded() {
118 guarantee(!_discovering_refs, "Discovering refs?");
119 for (int i = 0; i < _max_num_q * subclasses_of_ref(); i++) {
120 guarantee(_discoveredSoftRefs[i].empty(),
121 "Found non-empty discovered list");
122 }
123 }
124 #endif
125
126 void ReferenceProcessor::weak_oops_do(OopClosure* f) {
127 // Should this instead be
128 // for (int i = 0; i < subclasses_of_ref(); i++_ {
129 // for (int j = 0; j < _num_q; j++) {
130 // int index = i * _max_num_q + j;
131 for (int i = 0; i < _max_num_q * subclasses_of_ref(); i++) {
132 if (UseCompressedOops) {
133 f->do_oop((narrowOop*)_discoveredSoftRefs[i].adr_head());
134 } else {
135 f->do_oop((oop*)_discoveredSoftRefs[i].adr_head());
136 }
137 }
138 }
139
140 void ReferenceProcessor::oops_do(OopClosure* f) {
141 f->do_oop(adr_sentinel_ref());
142 }
143
144 void ReferenceProcessor::update_soft_ref_master_clock() {
145 // Update (advance) the soft ref master clock field. This must be done
146 // after processing the soft ref list.
147 jlong now = os::javaTimeMillis();
148 jlong clock = java_lang_ref_SoftReference::clock();
149 NOT_PRODUCT(
150 if (now < clock) {
151 warning("time warp: %d to %d", clock, now);
345 public:
346 RefProcEnqueueTask(ReferenceProcessor& ref_processor,
347 DiscoveredList discovered_refs[],
348 HeapWord* pending_list_addr,
349 oop sentinel_ref,
350 int n_queues)
351 : EnqueueTask(ref_processor, discovered_refs,
352 pending_list_addr, sentinel_ref, n_queues)
353 { }
354
355 virtual void work(unsigned int work_id) {
356 assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds");
357 // Simplest first cut: static partitioning.
358 int index = work_id;
359 // The increment on "index" must correspond to the maximum number of queues
360 // (n_queues) with which that ReferenceProcessor was created. That
361 // is because of the "clever" way the discovered references lists were
362 // allocated and are indexed into.
363 assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected");
364 for (int j = 0;
365 j < ReferenceProcessor::subclasses_of_ref();
366 j++, index += _n_queues) {
367 _ref_processor.enqueue_discovered_reflist(
368 _refs_lists[index], _pending_list_addr);
369 _refs_lists[index].set_head(_sentinel_ref);
370 _refs_lists[index].set_length(0);
371 }
372 }
373 };
374
375 // Enqueue references that are not made active again
376 void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr,
377 AbstractRefProcTaskExecutor* task_executor) {
378 if (_processing_is_mt && task_executor != NULL) {
379 // Parallel code
380 RefProcEnqueueTask tsk(*this, _discoveredSoftRefs,
381 pending_list_addr, sentinel_ref(), _max_num_q);
382 task_executor->execute(tsk);
383 } else {
384 // Serial code: call the parent class's implementation
385 for (int i = 0; i < _max_num_q * subclasses_of_ref(); i++) {
386 enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr);
387 _discoveredSoftRefs[i].set_head(sentinel_ref());
388 _discoveredSoftRefs[i].set_length(0);
389 }
390 }
391 }
392
393 // Iterator for the list of discovered references.
394 class DiscoveredListIterator {
395 public:
396 inline DiscoveredListIterator(DiscoveredList& refs_list,
397 OopClosure* keep_alive,
398 BoolObjectClosure* is_alive);
399
400 // End Of List.
401 inline bool has_next() const { return _next != ReferenceProcessor::sentinel_ref(); }
402
403 // Get oop to the Reference object.
404 inline oop obj() const { return _ref; }
405
406 // Get oop to the referent object.
407 inline oop referent() const { return _referent; }
408
409 // Returns true if referent is alive.
410 inline bool is_referent_alive() const;
411
412 // Loads data for the current reference.
413 // The "allow_null_referent" argument tells us to allow for the possibility
414 // of a NULL referent in the discovered Reference object. This typically
415 // happens in the case of concurrent collectors that may have done the
416 // discovery concurrently, or interleaved, with mutator execution.
417 inline void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
418
419 // Move to the next discovered reference.
420 inline void next();
421
422 // Remove the current reference from the list
423 inline void remove();
424
425 // Make the Reference object active again.
426 inline void make_active() {
427 // For G1 we don't want to use set_next - it
428 // will dirty the card for the next field of
429 // the reference object and will fail
430 // CT verification.
431 if (UseG1GC) {
432 BarrierSet* bs = oopDesc::bs();
433 HeapWord* next_addr = java_lang_ref_Reference::next_addr(_ref);
434
435 if (UseCompressedOops) {
436 bs->write_ref_field_pre((narrowOop*)next_addr, NULL);
437 } else {
438 bs->write_ref_field_pre((oop*)next_addr, NULL);
439 }
440 java_lang_ref_Reference::set_next_raw(_ref, NULL);
441 } else {
442 java_lang_ref_Reference::set_next(_ref, NULL);
443 }
444 }
445
446 // Make the referent alive.
447 inline void make_referent_alive() {
448 if (UseCompressedOops) {
449 _keep_alive->do_oop((narrowOop*)_referent_addr);
450 } else {
451 _keep_alive->do_oop((oop*)_referent_addr);
452 }
453 }
454
455 // Update the discovered field.
456 inline void update_discovered() {
457 // First _prev_next ref actually points into DiscoveredList (gross).
458 if (UseCompressedOops) {
459 _keep_alive->do_oop((narrowOop*)_prev_next);
460 } else {
461 _keep_alive->do_oop((oop*)_prev_next);
462 }
463 }
464
466 inline void clear_referent() { oop_store_raw(_referent_addr, NULL); }
467
468 // Statistics
469 NOT_PRODUCT(
470 inline size_t processed() const { return _processed; }
471 inline size_t removed() const { return _removed; }
472 )
473
474 inline void move_to_next();
475
476 private:
477 DiscoveredList& _refs_list;
478 HeapWord* _prev_next;
479 oop _ref;
480 HeapWord* _discovered_addr;
481 oop _next;
482 HeapWord* _referent_addr;
483 oop _referent;
484 OopClosure* _keep_alive;
485 BoolObjectClosure* _is_alive;
486
487 DEBUG_ONLY(
488 oop _first_seen; // cyclic linked list check
489 )
490
491 NOT_PRODUCT(
492 size_t _processed;
493 size_t _removed;
494 )
495 };
496
497 inline DiscoveredListIterator::DiscoveredListIterator(DiscoveredList& refs_list,
498 OopClosure* keep_alive,
499 BoolObjectClosure* is_alive) :
500 _refs_list(refs_list),
501 _prev_next(refs_list.adr_head()),
502 _ref(refs_list.head()),
503 #ifdef ASSERT
504 _first_seen(refs_list.head()),
505 #endif
506 #ifndef PRODUCT
507 _processed(0),
508 _removed(0),
509 #endif
510 _next(refs_list.head()),
511 _keep_alive(keep_alive),
512 _is_alive(is_alive)
513 { }
514
515 inline bool DiscoveredListIterator::is_referent_alive() const {
516 return _is_alive->do_object_b(_referent);
517 }
518
519 inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
520 _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref);
718 // Remember to keep sentinel pointer around
719 iter.update_discovered();
720 // Close the reachable set
721 complete_gc->do_void();
722 }
723
724 void
725 ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) {
726 oop obj = refs_list.head();
727 while (obj != sentinel_ref()) {
728 oop discovered = java_lang_ref_Reference::discovered(obj);
729 java_lang_ref_Reference::set_discovered_raw(obj, NULL);
730 obj = discovered;
731 }
732 refs_list.set_head(sentinel_ref());
733 refs_list.set_length(0);
734 }
735
736 void ReferenceProcessor::abandon_partial_discovery() {
737 // loop over the lists
738 for (int i = 0; i < _max_num_q * subclasses_of_ref(); i++) {
739 if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
740 gclog_or_tty->print_cr("\nAbandoning %s discovered list", list_name(i));
741 }
742 abandon_partial_discovered_list(_discoveredSoftRefs[i]);
743 }
744 }
745
746 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask {
747 public:
748 RefProcPhase1Task(ReferenceProcessor& ref_processor,
749 DiscoveredList refs_lists[],
750 ReferencePolicy* policy,
751 bool marks_oops_alive)
752 : ProcessTask(ref_processor, refs_lists, marks_oops_alive),
753 _policy(policy)
754 { }
755 virtual void work(unsigned int i, BoolObjectClosure& is_alive,
756 OopClosure& keep_alive,
757 VoidClosure& complete_gc)
758 {
759 Thread* thr = Thread::current();
760 int refs_list_index = ((WorkerThread*)thr)->id();
839 assert(to_idx < _num_q, "Sanity Check!");
840 if (ref_lists[to_idx].length() < avg_refs) {
841 // move superfluous refs
842 size_t refs_to_move;
843 // Move all the Ref's if the from queue will not be processed.
844 if (move_all) {
845 refs_to_move = MIN2(ref_lists[from_idx].length(),
846 avg_refs - ref_lists[to_idx].length());
847 } else {
848 refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs,
849 avg_refs - ref_lists[to_idx].length());
850 }
851 oop move_head = ref_lists[from_idx].head();
852 oop move_tail = move_head;
853 oop new_head = move_head;
854 // find an element to split the list on
855 for (size_t j = 0; j < refs_to_move; ++j) {
856 move_tail = new_head;
857 new_head = java_lang_ref_Reference::discovered(new_head);
858 }
859
860 if (_discovered_list_needs_barrier) {
861 java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head());
862 } else {
863 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(move_tail);
864 oop_store_raw(discovered_addr, ref_lists[to_idx].head());
865 }
866
867 ref_lists[to_idx].set_head(move_head);
868 ref_lists[to_idx].inc_length(refs_to_move);
869 ref_lists[from_idx].set_head(new_head);
870 ref_lists[from_idx].dec_length(refs_to_move);
871 if (ref_lists[from_idx].length() == 0) {
872 break;
873 }
874 } else {
875 to_idx = (to_idx + 1) % _num_q;
876 }
877 }
878 }
879 #ifdef ASSERT
880 size_t balanced_total_refs = 0;
881 for (int i = 0; i < _max_num_q; ++i) {
882 balanced_total_refs += ref_lists[i].length();
883 if (TraceReferenceGC && PrintGCDetails) {
884 gclog_or_tty->print("%d ", ref_lists[i].length());
885 }
886 }
960 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc);
961 }
962 }
963
964 // Phase 3:
965 // . Traverse the list and process referents as appropriate.
966 if (mt_processing) {
967 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/);
968 task_executor->execute(phase3);
969 } else {
970 for (int i = 0; i < _max_num_q; i++) {
971 process_phase3(refs_lists[i], clear_referent,
972 is_alive, keep_alive, complete_gc);
973 }
974 }
975 }
976
977 void ReferenceProcessor::clean_up_discovered_references() {
978 // loop over the lists
979 // Should this instead be
980 // for (int i = 0; i < subclasses_of_ref(); i++) {
981 // for (int j = 0; j < _num_q; j++) {
982 // int index = i * _max_num_q + j;
983 for (int i = 0; i < _max_num_q * subclasses_of_ref(); i++) {
984 if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
985 gclog_or_tty->print_cr(
986 "\nScrubbing %s discovered list of Null referents",
987 list_name(i));
988 }
989 clean_up_discovered_reflist(_discoveredSoftRefs[i]);
990 }
991 }
992
993 void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) {
994 assert(!discovery_is_atomic(), "Else why call this method?");
995 DiscoveredListIterator iter(refs_list, NULL, NULL);
996 while (iter.has_next()) {
997 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
998 oop next = java_lang_ref_Reference::next(iter.obj());
999 assert(next->is_oop_or_null(), "bad next field");
1000 // If referent has been cleared or Reference is not active,
1001 // drop it.
1002 if (iter.referent() == NULL || next != NULL) {
1003 debug_only(
1184 // reachable.
1185 if (is_alive_non_header() != NULL) {
1186 verify_referent(obj);
1187 if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) {
1188 return false; // referent is reachable
1189 }
1190 }
1191 if (rt == REF_SOFT) {
1192 // For soft refs we can decide now if these are not
1193 // current candidates for clearing, in which case we
1194 // can mark through them now, rather than delaying that
1195 // to the reference-processing phase. Since all current
1196 // time-stamp policies advance the soft-ref clock only
1197 // at a major collection cycle, this is always currently
1198 // accurate.
1199 if (!_current_soft_ref_policy->should_clear_reference(obj)) {
1200 return false;
1201 }
1202 }
1203
1204 ResourceMark rm; // Needed for tracing.
1205
1206 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
1207 const oop discovered = java_lang_ref_Reference::discovered(obj);
1208 assert(discovered->is_oop_or_null(), "bad discovered field");
1209 if (discovered != NULL) {
1210 // The reference has already been discovered...
1211 if (TraceReferenceGC) {
1212 gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)",
1213 obj, obj->blueprint()->internal_name());
1214 }
1215 if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
1216 // assumes that an object is not processed twice;
1217 // if it's been already discovered it must be on another
1218 // generation's discovered list; so we won't discover it.
1219 return false;
1220 } else {
1221 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery,
1222 "Unrecognized policy");
1223 // Check assumption that an object is not potentially
1224 // discovered twice except by concurrent collectors that potentially
1225 // trace the same Reference object twice.
1400 keep_alive->do_oop(next_addr);
1401 }
1402 iter.move_to_next();
1403 } else {
1404 iter.next();
1405 }
1406 }
1407 // Close the reachable set
1408 complete_gc->do_void();
1409
1410 NOT_PRODUCT(
1411 if (PrintGCDetails && PrintReferenceGC && (iter.processed() > 0)) {
1412 gclog_or_tty->print_cr(" Dropped %d Refs out of %d "
1413 "Refs in discovered list " INTPTR_FORMAT,
1414 iter.removed(), iter.processed(), (address)refs_list.head());
1415 }
1416 )
1417 }
1418
1419 const char* ReferenceProcessor::list_name(int i) {
1420 assert(i >= 0 && i <= _max_num_q * subclasses_of_ref(), "Out of bounds index");
1421 int j = i / _max_num_q;
1422 switch (j) {
1423 case 0: return "SoftRef";
1424 case 1: return "WeakRef";
1425 case 2: return "FinalRef";
1426 case 3: return "PhantomRef";
1427 }
1428 ShouldNotReachHere();
1429 return NULL;
1430 }
1431
1432 #ifndef PRODUCT
1433 void ReferenceProcessor::verify_ok_to_handle_reflists() {
1434 // empty for now
1435 }
1436 #endif
1437
1438 void ReferenceProcessor::verify() {
1439 guarantee(sentinel_ref() != NULL && sentinel_ref()->is_oop(), "Lost _sentinelRef");
1440 }
1441
1442 #ifndef PRODUCT
1443 void ReferenceProcessor::clear_discovered_references() {
1444 guarantee(!_discovering_refs, "Discovering refs?");
1445 for (int i = 0; i < _max_num_q * subclasses_of_ref(); i++) {
1446 oop obj = _discoveredSoftRefs[i].head();
1447 while (obj != sentinel_ref()) {
1448 oop next = java_lang_ref_Reference::discovered(obj);
1449 java_lang_ref_Reference::set_discovered(obj, (oop) NULL);
1450 obj = next;
1451 }
1452 _discoveredSoftRefs[i].set_head(sentinel_ref());
1453 _discoveredSoftRefs[i].set_length(0);
1454 }
1455 }
1456 #endif // PRODUCT
|