src/share/vm/memory/referenceProcessor.cpp

Print this page
rev 2652 : 7085906: Replace the permgen allocated sentinelRef with a self-looped end
Summary: Remove the sentinelRef and let the last Reference in a discovered chain point back to itself.
Reviewed-by: TBD1, TBD2

*** 33,66 **** #include "runtime/java.hpp" #include "runtime/jniHandles.hpp" ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; - oop ReferenceProcessor::_sentinelRef = NULL; const int subclasses_of_ref = REF_PHANTOM - REF_OTHER; // List of discovered references. class DiscoveredList { public: DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { } oop head() const { ! return UseCompressedOops ? oopDesc::decode_heap_oop_not_null(_compressed_head) : _oop_head; } HeapWord* adr_head() { return UseCompressedOops ? (HeapWord*)&_compressed_head : (HeapWord*)&_oop_head; } void set_head(oop o) { if (UseCompressedOops) { // Must compress the head ptr. ! _compressed_head = oopDesc::encode_heap_oop_not_null(o); } else { _oop_head = o; } } ! bool empty() const { return head() == ReferenceProcessor::sentinel_ref(); } size_t length() { return _len; } void set_length(size_t len) { _len = len; } void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); } void dec_length(size_t dec) { _len -= dec; } private: --- 33,65 ---- #include "runtime/java.hpp" #include "runtime/jniHandles.hpp" ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; const int subclasses_of_ref = REF_PHANTOM - REF_OTHER; // List of discovered references. class DiscoveredList { public: DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { } oop head() const { ! return UseCompressedOops ? oopDesc::decode_heap_oop(_compressed_head) : _oop_head; } HeapWord* adr_head() { return UseCompressedOops ? (HeapWord*)&_compressed_head : (HeapWord*)&_oop_head; } void set_head(oop o) { if (UseCompressedOops) { // Must compress the head ptr. ! _compressed_head = oopDesc::encode_heap_oop(o); } else { _oop_head = o; } } ! bool empty() const { return head() == NULL; } size_t length() { return _len; } void set_length(size_t len) { _len = len; } void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); } void dec_length(size_t dec) { _len -= dec; } private:
*** 74,98 **** void referenceProcessor_init() { ReferenceProcessor::init_statics(); } void ReferenceProcessor::init_statics() { - assert(_sentinelRef == NULL, "should be initialized precisely once"); - EXCEPTION_MARK; - _sentinelRef = instanceKlass::cast( - SystemDictionary::Reference_klass())-> - allocate_permanent_instance(THREAD); - // Initialize the master soft ref clock. java_lang_ref_SoftReference::set_clock(os::javaTimeMillis()); - if (HAS_PENDING_EXCEPTION) { - Handle ex(THREAD, PENDING_EXCEPTION); - vm_exit_during_initialization(ex); - } - assert(_sentinelRef != NULL && _sentinelRef->is_oop(), - "Just constructed it!"); _always_clear_soft_ref_policy = new AlwaysClearPolicy(); _default_soft_ref_policy = new COMPILER2_PRESENT(LRUMaxHeapPolicy()) NOT_COMPILER2(LRUCurrentHeapPolicy()); if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) { vm_exit_during_initialization("Could not allocate reference policy object"); --- 73,85 ----
*** 128,141 **** vm_exit_during_initialization("Could not allocated RefProc Array"); } _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_q]; _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_q]; _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q]; ! assert(sentinel_ref() != NULL, "_sentinelRef is NULL"); ! // Initialized all entries to _sentinelRef for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { ! _discoveredSoftRefs[i].set_head(sentinel_ref()); _discoveredSoftRefs[i].set_length(0); } // If we do barreirs, cache a copy of the barrier set. if (discovered_list_needs_barrier) { _bs = Universe::heap()->barrier_set(); --- 115,127 ---- vm_exit_during_initialization("Could not allocated RefProc Array"); } _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_q]; _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_q]; _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q]; ! // Initialized all entries to NULL for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { ! _discoveredSoftRefs[i].set_head(NULL); _discoveredSoftRefs[i].set_length(0); } // If we do barreirs, cache a copy of the barrier set. if (discovered_list_needs_barrier) { _bs = Universe::heap()->barrier_set();
*** 165,178 **** f->do_oop((oop*)_discoveredSoftRefs[i].adr_head()); } } } - void ReferenceProcessor::oops_do(OopClosure* f) { - f->do_oop(adr_sentinel_ref()); - } - void ReferenceProcessor::update_soft_ref_master_clock() { // Update (advance) the soft ref master clock field. This must be done // after processing the soft ref list. jlong now = os::javaTimeMillis(); jlong clock = java_lang_ref_SoftReference::clock(); --- 151,160 ----
*** 281,292 **** unsigned int count = count_jni_refs(); gclog_or_tty->print(", %u refs", count); } #endif JNIHandles::weak_oops_do(is_alive, keep_alive); - // Finally remember to keep sentinel around - keep_alive->do_oop(adr_sentinel_ref()); complete_gc->do_void(); } template <class T> --- 263,272 ----
*** 332,356 **** // to the pending list. if (TraceReferenceGC && PrintGCDetails) { gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list " INTPTR_FORMAT, (address)refs_list.head()); } ! oop obj = refs_list.head(); // Walk down the list, copying the discovered field into ! // the next field and clearing it (except for the last ! // non-sentinel object which is treated specially to avoid ! // confusion with an active reference). ! while (obj != sentinel_ref()) { assert(obj->is_instanceRef(), "should be reference object"); ! oop next = java_lang_ref_Reference::discovered(obj); if (TraceReferenceGC && PrintGCDetails) { gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next " INTPTR_FORMAT, obj, next); } assert(java_lang_ref_Reference::next(obj) == NULL, "The reference should not be enqueued"); ! if (next == sentinel_ref()) { // obj is last // Swap refs_list into pendling_list_addr and // set obj's next to what we read from pending_list_addr. oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); // Need oop_check on pending_list_addr above; // see special oop-check code at the end of --- 312,337 ---- // to the pending list. if (TraceReferenceGC && PrintGCDetails) { gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list " INTPTR_FORMAT, (address)refs_list.head()); } ! ! oop obj = NULL; ! oop next = refs_list.head(); // Walk down the list, copying the discovered field into ! // the next field and clearing it. ! while (obj != next) { ! obj = next; assert(obj->is_instanceRef(), "should be reference object"); ! next = java_lang_ref_Reference::discovered(obj); if (TraceReferenceGC && PrintGCDetails) { gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next " INTPTR_FORMAT, obj, next); } assert(java_lang_ref_Reference::next(obj) == NULL, "The reference should not be enqueued"); ! if (next == obj) { // obj is last // Swap refs_list into pendling_list_addr and // set obj's next to what we read from pending_list_addr. oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); // Need oop_check on pending_list_addr above; // see special oop-check code at the end of
*** 364,387 **** } } else { java_lang_ref_Reference::set_next(obj, next); } java_lang_ref_Reference::set_discovered(obj, (oop) NULL); - obj = next; } } // Parallel enqueue task class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask { public: RefProcEnqueueTask(ReferenceProcessor& ref_processor, DiscoveredList discovered_refs[], HeapWord* pending_list_addr, - oop sentinel_ref, int n_queues) : EnqueueTask(ref_processor, discovered_refs, ! pending_list_addr, sentinel_ref, n_queues) { } virtual void work(unsigned int work_id) { assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds"); // Simplest first cut: static partitioning. --- 345,366 ---- } } else { java_lang_ref_Reference::set_next(obj, next); } java_lang_ref_Reference::set_discovered(obj, (oop) NULL); } } // Parallel enqueue task class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask { public: RefProcEnqueueTask(ReferenceProcessor& ref_processor, DiscoveredList discovered_refs[], HeapWord* pending_list_addr, int n_queues) : EnqueueTask(ref_processor, discovered_refs, ! pending_list_addr, n_queues) { } virtual void work(unsigned int work_id) { assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds"); // Simplest first cut: static partitioning.
*** 394,404 **** for (int j = 0; j < subclasses_of_ref; j++, index += _n_queues) { _ref_processor.enqueue_discovered_reflist( _refs_lists[index], _pending_list_addr); ! _refs_lists[index].set_head(_sentinel_ref); _refs_lists[index].set_length(0); } } }; --- 373,383 ---- for (int j = 0; j < subclasses_of_ref; j++, index += _n_queues) { _ref_processor.enqueue_discovered_reflist( _refs_lists[index], _pending_list_addr); ! _refs_lists[index].set_head(NULL); _refs_lists[index].set_length(0); } } };
*** 406,422 **** void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor) { if (_processing_is_mt && task_executor != NULL) { // Parallel code RefProcEnqueueTask tsk(*this, _discoveredSoftRefs, ! pending_list_addr, sentinel_ref(), _max_num_q); task_executor->execute(tsk); } else { // Serial code: call the parent class's implementation for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr); ! _discoveredSoftRefs[i].set_head(sentinel_ref()); _discoveredSoftRefs[i].set_length(0); } } } --- 385,401 ---- void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor) { if (_processing_is_mt && task_executor != NULL) { // Parallel code RefProcEnqueueTask tsk(*this, _discoveredSoftRefs, ! pending_list_addr, _max_num_q); task_executor->execute(tsk); } else { // Serial code: call the parent class's implementation for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr); ! _discoveredSoftRefs[i].set_head(NULL); _discoveredSoftRefs[i].set_length(0); } } }
*** 426,436 **** inline DiscoveredListIterator(DiscoveredList& refs_list, OopClosure* keep_alive, BoolObjectClosure* is_alive); // End Of List. ! inline bool has_next() const { return _next != ReferenceProcessor::sentinel_ref(); } // Get oop to the Reference object. inline oop obj() const { return _ref; } // Get oop to the referent object. --- 405,415 ---- inline DiscoveredListIterator(DiscoveredList& refs_list, OopClosure* keep_alive, BoolObjectClosure* is_alive); // End Of List. ! inline bool has_next() const { return _ref != NULL; } // Get oop to the Reference object. inline oop obj() const { return _ref; } // Get oop to the referent object.
*** 466,480 **** --- 445,463 ---- // Update the discovered field. inline void update_discovered() { // First _prev_next ref actually points into DiscoveredList (gross). if (UseCompressedOops) { + if (!oopDesc::is_null(*(narrowOop*)_prev_next)) { _keep_alive->do_oop((narrowOop*)_prev_next); + } } else { + if (!oopDesc::is_null(*(oop*)_prev_next)) { _keep_alive->do_oop((oop*)_prev_next); } } + } // NULL out referent pointer. inline void clear_referent() { oop_store_raw(_referent_addr, NULL); } // Statistics
*** 486,495 **** --- 469,479 ---- inline void move_to_next(); private: DiscoveredList& _refs_list; HeapWord* _prev_next; + oop _prev; oop _ref; HeapWord* _discovered_addr; oop _next; HeapWord* _referent_addr; oop _referent;
*** 507,525 **** inline DiscoveredListIterator::DiscoveredListIterator(DiscoveredList& refs_list, OopClosure* keep_alive, BoolObjectClosure* is_alive) : _refs_list(refs_list), _prev_next(refs_list.adr_head()), _ref(refs_list.head()), #ifdef ASSERT _first_seen(refs_list.head()), #endif #ifndef PRODUCT _processed(0), _removed(0), #endif ! _next(refs_list.head()), _keep_alive(keep_alive), _is_alive(is_alive) { } inline bool DiscoveredListIterator::is_referent_alive() const { --- 491,510 ---- inline DiscoveredListIterator::DiscoveredListIterator(DiscoveredList& refs_list, OopClosure* keep_alive, BoolObjectClosure* is_alive) : _refs_list(refs_list), _prev_next(refs_list.adr_head()), + _prev(NULL), _ref(refs_list.head()), #ifdef ASSERT _first_seen(refs_list.head()), #endif #ifndef PRODUCT _processed(0), _removed(0), #endif ! _next(NULL), _keep_alive(keep_alive), _is_alive(is_alive) { } inline bool DiscoveredListIterator::is_referent_alive() const {
*** 542,571 **** "bad referent"); } inline void DiscoveredListIterator::next() { _prev_next = _discovered_addr; move_to_next(); } inline void DiscoveredListIterator::remove() { assert(_ref->is_oop(), "Dropping a bad reference"); oop_store_raw(_discovered_addr, NULL); // First _prev_next ref actually points into DiscoveredList (gross). if (UseCompressedOops) { // Remove Reference object from list. ! oopDesc::encode_store_heap_oop_not_null((narrowOop*)_prev_next, _next); } else { // Remove Reference object from list. ! oopDesc::store_heap_oop((oop*)_prev_next, _next); } NOT_PRODUCT(_removed++); _refs_list.dec_length(1); } inline void DiscoveredListIterator::move_to_next() { _ref = _next; assert(_ref != _first_seen, "cyclic ref_list found"); NOT_PRODUCT(_processed++); } // NOTE: process_phase*() are largely similar, and at a high level --- 527,573 ---- "bad referent"); } inline void DiscoveredListIterator::next() { _prev_next = _discovered_addr; + _prev = _ref; move_to_next(); } inline void DiscoveredListIterator::remove() { assert(_ref->is_oop(), "Dropping a bad reference"); oop_store_raw(_discovered_addr, NULL); + // First _prev_next ref actually points into DiscoveredList (gross). + oop new_next; + if (_next == _ref) { + // At the end of the list, we should make _prev point to itself. + // If _ref is the first ref, then _prev_next will be in the DiscoveredList, + // and _prev will be NULL. + new_next = _prev; + } else { + new_next = _next; + } + if (UseCompressedOops) { // Remove Reference object from list. ! oopDesc::encode_store_heap_oop((narrowOop*)_prev_next, new_next); } else { // Remove Reference object from list. ! oopDesc::store_heap_oop((oop*)_prev_next, new_next); } NOT_PRODUCT(_removed++); _refs_list.dec_length(1); } inline void DiscoveredListIterator::move_to_next() { + if (_ref == _next) { + // End of the list. + _ref = NULL; + } else { _ref = _next; + } assert(_ref != _first_seen, "cyclic ref_list found"); NOT_PRODUCT(_processed++); } // NOTE: process_phase*() are largely similar, and at a high level
*** 723,750 **** iter.obj(), iter.obj()->blueprint()->internal_name()); } assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference"); iter.next(); } ! // Remember to keep sentinel pointer around iter.update_discovered(); // Close the reachable set complete_gc->do_void(); } void ! ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) { ! oop obj = refs_list.head(); ! while (obj != sentinel_ref()) { ! oop discovered = java_lang_ref_Reference::discovered(obj); java_lang_ref_Reference::set_discovered_raw(obj, NULL); - obj = discovered; } ! refs_list.set_head(sentinel_ref()); refs_list.set_length(0); } void ReferenceProcessor::abandon_partial_discovery() { // loop over the lists for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) { gclog_or_tty->print_cr("\nAbandoning %s discovered list", --- 725,758 ---- iter.obj(), iter.obj()->blueprint()->internal_name()); } assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference"); iter.next(); } ! // Remember to update the next pointer of the last ref. iter.update_discovered(); // Close the reachable set complete_gc->do_void(); } void ! ReferenceProcessor::clear_discovered_references(DiscoveredList& refs_list) { ! oop obj = NULL; ! oop next = refs_list.head(); ! while (next != obj) { ! obj = next; ! next = java_lang_ref_Reference::discovered(obj); java_lang_ref_Reference::set_discovered_raw(obj, NULL); } ! refs_list.set_head(NULL); refs_list.set_length(0); } + void + ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) { + clear_discovered_references(refs_list); + } + void ReferenceProcessor::abandon_partial_discovery() { // loop over the lists for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) { gclog_or_tty->print_cr("\nAbandoning %s discovered list",
*** 857,878 **** --- 865,903 ---- avg_refs - ref_lists[to_idx].length()); } else { refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs, avg_refs - ref_lists[to_idx].length()); } + + assert(refs_to_move > 0, "otherwise the code below will fail"); + oop move_head = ref_lists[from_idx].head(); oop move_tail = move_head; oop new_head = move_head; // find an element to split the list on for (size_t j = 0; j < refs_to_move; ++j) { move_tail = new_head; new_head = java_lang_ref_Reference::discovered(new_head); } + + // Add the chain to the to list. + if (ref_lists[to_idx].head() == NULL) { + // to list is empty. Make a loop at the end. + java_lang_ref_Reference::set_discovered(move_tail, move_tail); + } else { java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head()); + } ref_lists[to_idx].set_head(move_head); ref_lists[to_idx].inc_length(refs_to_move); + + // Remove the chain from the from list. + if (move_tail == new_head) { + // We found the end of the from list. + ref_lists[from_idx].set_head(NULL); + } else { ref_lists[from_idx].set_head(new_head); + } ref_lists[from_idx].dec_length(refs_to_move); if (ref_lists[from_idx].length() == 0) { break; } } else {
*** 1080,1114 **** HeapWord* discovered_addr) { assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller"); // First we must make sure this object is only enqueued once. CAS in a non null // discovered_addr. oop current_head = refs_list.head(); // Note: In the case of G1, this specific pre-barrier is strictly // not necessary because the only case we are interested in // here is when *discovered_addr is NULL (see the CAS further below), // so this will expand to nothing. As a result, we have manually // elided this out for G1, but left in the test for some future // collector that might have need for a pre-barrier here. if (_discovered_list_needs_barrier && !UseG1GC) { if (UseCompressedOops) { ! _bs->write_ref_field_pre((narrowOop*)discovered_addr, current_head); } else { ! _bs->write_ref_field_pre((oop*)discovered_addr, current_head); } guarantee(false, "Need to check non-G1 collector"); } ! oop retest = oopDesc::atomic_compare_exchange_oop(current_head, discovered_addr, NULL); if (retest == NULL) { // This thread just won the right to enqueue the object. // We have separate lists for enqueueing so no synchronization // is necessary. refs_list.set_head(obj); refs_list.inc_length(1); if (_discovered_list_needs_barrier) { ! _bs->write_ref_field((void*)discovered_addr, current_head); } if (TraceReferenceGC) { gclog_or_tty->print_cr("Enqueued reference (mt) (" INTPTR_FORMAT ": %s)", obj, obj->blueprint()->internal_name()); --- 1105,1141 ---- HeapWord* discovered_addr) { assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller"); // First we must make sure this object is only enqueued once. CAS in a non null // discovered_addr. oop current_head = refs_list.head(); + // The last ref must have its discovered field pointing to itself. + oop next_discovered = (current_head != NULL) ? current_head : obj; // Note: In the case of G1, this specific pre-barrier is strictly // not necessary because the only case we are interested in // here is when *discovered_addr is NULL (see the CAS further below), // so this will expand to nothing. As a result, we have manually // elided this out for G1, but left in the test for some future // collector that might have need for a pre-barrier here. if (_discovered_list_needs_barrier && !UseG1GC) { if (UseCompressedOops) { ! _bs->write_ref_field_pre((narrowOop*)discovered_addr, next_discovered); } else { ! _bs->write_ref_field_pre((oop*)discovered_addr, next_discovered); } guarantee(false, "Need to check non-G1 collector"); } ! oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr, NULL); if (retest == NULL) { // This thread just won the right to enqueue the object. // We have separate lists for enqueueing so no synchronization // is necessary. refs_list.set_head(obj); refs_list.inc_length(1); if (_discovered_list_needs_barrier) { ! _bs->write_ref_field((void*)discovered_addr, next_discovered); } if (TraceReferenceGC) { gclog_or_tty->print_cr("Enqueued reference (mt) (" INTPTR_FORMAT ": %s)", obj, obj->blueprint()->internal_name());
*** 1260,1283 **** // If "_discovered_list_needs_barrier", we do write barriers when // updating the discovered reference list. Otherwise, we do a raw store // here: the field will be visited later when processing the discovered // references. oop current_head = list->head(); // As in the case further above, since we are over-writing a NULL // pre-value, we can safely elide the pre-barrier here for the case of G1. assert(discovered == NULL, "control point invariant"); if (_discovered_list_needs_barrier && !UseG1GC) { // safe to elide for G1 if (UseCompressedOops) { ! _bs->write_ref_field_pre((narrowOop*)discovered_addr, current_head); } else { ! _bs->write_ref_field_pre((oop*)discovered_addr, current_head); } guarantee(false, "Need to check non-G1 collector"); } ! oop_store_raw(discovered_addr, current_head); if (_discovered_list_needs_barrier) { ! _bs->write_ref_field((void*)discovered_addr, current_head); } list->set_head(obj); list->inc_length(1); if (TraceReferenceGC) { --- 1287,1313 ---- // If "_discovered_list_needs_barrier", we do write barriers when // updating the discovered reference list. Otherwise, we do a raw store // here: the field will be visited later when processing the discovered // references. oop current_head = list->head(); + // The last ref must have its discovered field pointing to itself. + oop next_discovered = (current_head != NULL) ? current_head : obj; + // As in the case further above, since we are over-writing a NULL // pre-value, we can safely elide the pre-barrier here for the case of G1. assert(discovered == NULL, "control point invariant"); if (_discovered_list_needs_barrier && !UseG1GC) { // safe to elide for G1 if (UseCompressedOops) { ! _bs->write_ref_field_pre((narrowOop*)discovered_addr, next_discovered); } else { ! _bs->write_ref_field_pre((oop*)discovered_addr, next_discovered); } guarantee(false, "Need to check non-G1 collector"); } ! oop_store_raw(discovered_addr, next_discovered); if (_discovered_list_needs_barrier) { ! _bs->write_ref_field((void*)discovered_addr, next_discovered); } list->set_head(obj); list->inc_length(1); if (TraceReferenceGC) {
*** 1435,1458 **** void ReferenceProcessor::verify_ok_to_handle_reflists() { // empty for now } #endif - void ReferenceProcessor::verify() { - guarantee(sentinel_ref() != NULL && sentinel_ref()->is_oop(), "Lost _sentinelRef"); - } - #ifndef PRODUCT void ReferenceProcessor::clear_discovered_references() { guarantee(!_discovering_refs, "Discovering refs?"); for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { ! oop obj = _discoveredSoftRefs[i].head(); ! while (obj != sentinel_ref()) { ! oop next = java_lang_ref_Reference::discovered(obj); ! java_lang_ref_Reference::set_discovered(obj, (oop) NULL); ! obj = next; ! } ! _discoveredSoftRefs[i].set_head(sentinel_ref()); ! _discoveredSoftRefs[i].set_length(0); } } #endif // PRODUCT --- 1465,1478 ---- void ReferenceProcessor::verify_ok_to_handle_reflists() { // empty for now } #endif #ifndef PRODUCT void ReferenceProcessor::clear_discovered_references() { guarantee(!_discovering_refs, "Discovering refs?"); for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { ! clear_discovered_references(_discoveredSoftRefs[i]); } } + #endif // PRODUCT