--- old/hotspot/src/share/vm/memory/referenceProcessor.cpp 2009-08-01 04:12:26.059868028 +0100 +++ new/hotspot/src/share/vm/memory/referenceProcessor.cpp 2009-08-01 04:12:25.974951709 +0100 @@ -2,7 +2,7 @@ #pragma ident "@(#)referenceProcessor.cpp 1.57 07/08/17 12:30:18 JVM" #endif /* - * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. + * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,35 +28,54 @@ # include "incls/_precompiled.incl" # include "incls/_referenceProcessor.cpp.incl" +ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; +ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; +oop ReferenceProcessor::_sentinelRef = NULL; +const int subclasses_of_ref = REF_PHANTOM - REF_OTHER; + // List of discovered references. class DiscoveredList { public: - DiscoveredList() : _head(NULL), _len(0) { } - oop head() const { return _head; } - oop* head_ptr() { return &_head; } - void set_head(oop o) { _head = o; } - bool empty() const { return _head == ReferenceProcessor::_sentinelRef; } + DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { } + oop head() const { + return UseCompressedOops ? oopDesc::decode_heap_oop_not_null(_compressed_head) : + _oop_head; + } + HeapWord* adr_head() { + return UseCompressedOops ? (HeapWord*)&_compressed_head : + (HeapWord*)&_oop_head; + } + void set_head(oop o) { + if (UseCompressedOops) { + // Must compress the head ptr. + _compressed_head = oopDesc::encode_heap_oop_not_null(o); + } else { + _oop_head = o; + } + } + bool empty() const { return head() == ReferenceProcessor::sentinel_ref(); } size_t length() { return _len; } - void set_length(size_t len) { _len = len; } + void set_length(size_t len) { _len = len; } + void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); } + void dec_length(size_t dec) { _len -= dec; } private: + // Set value depending on UseCompressedOops. This could be a template class + // but then we have to fix all the instantiations and declarations that use this class. + oop _oop_head; + narrowOop _compressed_head; size_t _len; - oop _head; }; - -oop ReferenceProcessor::_sentinelRef = NULL; - -const int subclasses_of_ref = REF_PHANTOM - REF_OTHER; void referenceProcessor_init() { ReferenceProcessor::init_statics(); } void ReferenceProcessor::init_statics() { - assert(_sentinelRef == NULL, "should be initialized precsiely once"); + assert(_sentinelRef == NULL, "should be initialized precisely once"); EXCEPTION_MARK; _sentinelRef = instanceKlass::cast( - SystemDictionary::object_klass())-> - allocate_permanent_instance(THREAD); + SystemDictionary::reference_klass())-> + allocate_permanent_instance(THREAD); // Initialize the master soft ref clock. java_lang_ref_SoftReference::set_clock(os::javaTimeMillis()); @@ -67,20 +86,25 @@ } assert(_sentinelRef != NULL && _sentinelRef->is_oop(), "Just constructed it!"); + _always_clear_soft_ref_policy = new AlwaysClearPolicy(); + _default_soft_ref_policy = new COMPILER2_PRESENT(LRUMaxHeapPolicy()) + NOT_COMPILER2(LRUCurrentHeapPolicy()); + if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) { + vm_exit_during_initialization("Could not allocate reference policy object"); + } guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || RefDiscoveryPolicy == ReferentBasedDiscovery, "Unrecongnized RefDiscoveryPolicy"); } - -ReferenceProcessor* ReferenceProcessor::create_ref_processor( - MemRegion span, - bool atomic_discovery, - bool mt_discovery, - BoolObjectClosure* is_alive_non_header, - int parallel_gc_threads, - bool mt_processing) -{ +ReferenceProcessor* +ReferenceProcessor::create_ref_processor(MemRegion span, + bool atomic_discovery, + bool mt_discovery, + BoolObjectClosure* is_alive_non_header, + int parallel_gc_threads, + bool mt_processing, + bool dl_needs_barrier) { int mt_degree = 1; if (parallel_gc_threads > 1) { mt_degree = parallel_gc_threads; @@ -88,21 +112,27 @@ ReferenceProcessor* rp = new ReferenceProcessor(span, atomic_discovery, mt_discovery, mt_degree, - mt_processing); + mt_processing && (parallel_gc_threads > 0), + dl_needs_barrier); if (rp == NULL) { vm_exit_during_initialization("Could not allocate ReferenceProcessor object"); } rp->set_is_alive_non_header(is_alive_non_header); + rp->setup_policy(false /* default soft ref policy */); return rp; } - ReferenceProcessor::ReferenceProcessor(MemRegion span, - bool atomic_discovery, bool mt_discovery, int mt_degree, - bool mt_processing) : + bool atomic_discovery, + bool mt_discovery, + int mt_degree, + bool mt_processing, + bool discovered_list_needs_barrier) : _discovering_refs(false), _enqueuing_is_done(false), _is_alive_non_header(NULL), + _discovered_list_needs_barrier(discovered_list_needs_barrier), + _bs(NULL), _processing_is_mt(mt_processing), _next_id(0) { @@ -117,12 +147,16 @@ _discoveredWeakRefs = &_discoveredSoftRefs[_num_q]; _discoveredFinalRefs = &_discoveredWeakRefs[_num_q]; _discoveredPhantomRefs = &_discoveredFinalRefs[_num_q]; - assert(_sentinelRef != NULL, "_sentinelRef is NULL"); + assert(sentinel_ref() != NULL, "_sentinelRef is NULL"); // Initialized all entries to _sentinelRef for (int i = 0; i < _num_q * subclasses_of_ref; i++) { - _discoveredSoftRefs[i].set_head(_sentinelRef); + _discoveredSoftRefs[i].set_head(sentinel_ref()); _discoveredSoftRefs[i].set_length(0); } + // If we do barreirs, cache a copy of the barrier set. + if (discovered_list_needs_barrier) { + _bs = Universe::heap()->barrier_set(); + } } #ifndef PRODUCT @@ -137,16 +171,19 @@ void ReferenceProcessor::weak_oops_do(OopClosure* f) { for (int i = 0; i < _num_q * subclasses_of_ref; i++) { - f->do_oop(_discoveredSoftRefs[i].head_ptr()); + if (UseCompressedOops) { + f->do_oop((narrowOop*)_discoveredSoftRefs[i].adr_head()); + } else { + f->do_oop((oop*)_discoveredSoftRefs[i].adr_head()); + } } } void ReferenceProcessor::oops_do(OopClosure* f) { - f->do_oop(&_sentinelRef); + f->do_oop(adr_sentinel_ref()); } -void ReferenceProcessor::update_soft_ref_master_clock() -{ +void ReferenceProcessor::update_soft_ref_master_clock() { // Update (advance) the soft ref master clock field. This must be done // after processing the soft ref list. jlong now = os::javaTimeMillis(); @@ -167,10 +204,7 @@ // past clock value. } - -void -ReferenceProcessor::process_discovered_references( - ReferencePolicy* policy, +void ReferenceProcessor::process_discovered_references( BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc, @@ -185,7 +219,7 @@ // Soft references { TraceTime tt("SoftReference", trace_time, false, gclog_or_tty); - process_discovered_reflist(_discoveredSoftRefs, policy, true, + process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, is_alive, keep_alive, complete_gc, task_executor); } @@ -226,15 +260,13 @@ } } - #ifndef PRODUCT // Calculate the number of jni handles. -unsigned int ReferenceProcessor::count_jni_refs() -{ +uint ReferenceProcessor::count_jni_refs() { class AlwaysAliveClosure: public BoolObjectClosure { public: - bool do_object_b(oop obj) { return true; } - void do_object(oop obj) { assert(false, "Don't call"); } + virtual bool do_object_b(oop obj) { return true; } + virtual void do_object(oop obj) { assert(false, "Don't call"); } }; class CountHandleClosure: public OopClosure { @@ -242,9 +274,8 @@ int _count; public: CountHandleClosure(): _count(0) {} - void do_oop(oop* unused) { - _count++; - } + void do_oop(oop* unused) { _count++; } + void do_oop(narrowOop* unused) { ShouldNotReachHere(); } int count() { return _count; } }; CountHandleClosure global_handle_count; @@ -265,36 +296,48 @@ #endif JNIHandles::weak_oops_do(is_alive, keep_alive); // Finally remember to keep sentinel around - keep_alive->do_oop(&_sentinelRef); + keep_alive->do_oop(adr_sentinel_ref()); complete_gc->do_void(); } -bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) { - NOT_PRODUCT(verify_ok_to_handle_reflists()); + +template +static bool enqueue_discovered_ref_helper(ReferenceProcessor* ref, + AbstractRefProcTaskExecutor* task_executor) { + // Remember old value of pending references list - oop* pending_list_addr = java_lang_ref_Reference::pending_list_addr(); - oop old_pending_list_value = *pending_list_addr; + T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr(); + T old_pending_list_value = *pending_list_addr; // Enqueue references that are not made active again, and // clear the decks for the next collection (cycle). - enqueue_discovered_reflists(pending_list_addr, task_executor); + ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor); // Do the oop-check on pending_list_addr missed in // enqueue_discovered_reflist. We should probably // do a raw oop_check so that future such idempotent // oop_stores relying on the oop-check side-effect // may be elided automatically and safely without // affecting correctness. - oop_store(pending_list_addr, *(pending_list_addr)); + oop_store(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr)); // Stop treating discovered references specially. - disable_discovery(); + ref->disable_discovery(); // Return true if new pending references were added return old_pending_list_value != *pending_list_addr; } +bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) { + NOT_PRODUCT(verify_ok_to_handle_reflists()); + if (UseCompressedOops) { + return enqueue_discovered_ref_helper(this, task_executor); + } else { + return enqueue_discovered_ref_helper(this, task_executor); + } +} + void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list, - oop* pending_list_addr) { + HeapWord* pending_list_addr) { // Given a list of refs linked through the "discovered" field // (java.lang.ref.Reference.discovered) chain them through the // "next" field (java.lang.ref.Reference.next) and prepend @@ -308,19 +351,19 @@ // the next field and clearing it (except for the last // non-sentinel object which is treated specially to avoid // confusion with an active reference). - while (obj != _sentinelRef) { + while (obj != sentinel_ref()) { assert(obj->is_instanceRef(), "should be reference object"); oop next = java_lang_ref_Reference::discovered(obj); if (TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next " INTPTR_FORMAT, - (oopDesc*) obj, (oopDesc*) next); + gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next " INTPTR_FORMAT, + obj, next); } - assert(*java_lang_ref_Reference::next_addr(obj) == NULL, - "The reference should not be enqueued"); - if (next == _sentinelRef) { // obj is last + assert(java_lang_ref_Reference::next(obj) == NULL, + "The reference should not be enqueued"); + if (next == sentinel_ref()) { // obj is last // Swap refs_list into pendling_list_addr and // set obj's next to what we read from pending_list_addr. - oop old = (oop)Atomic::xchg_ptr(refs_list.head(), pending_list_addr); + oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); // Need oop_check on pending_list_addr above; // see special oop-check code at the end of // enqueue_discovered_reflists() further below. @@ -344,15 +387,14 @@ public: RefProcEnqueueTask(ReferenceProcessor& ref_processor, DiscoveredList discovered_refs[], - oop* pending_list_addr, - oop sentinel_ref, + HeapWord* pending_list_addr, + oop sentinel_ref, int n_queues) : EnqueueTask(ref_processor, discovered_refs, pending_list_addr, sentinel_ref, n_queues) { } - - virtual void work(unsigned int work_id) - { + + virtual void work(unsigned int work_id) { assert(work_id < (unsigned int)_ref_processor.num_q(), "Index out-of-bounds"); // Simplest first cut: static partitioning. int index = work_id; @@ -366,18 +408,18 @@ }; // Enqueue references that are not made active again -void ReferenceProcessor::enqueue_discovered_reflists(oop* pending_list_addr, +void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor) { if (_processing_is_mt && task_executor != NULL) { // Parallel code RefProcEnqueueTask tsk(*this, _discoveredSoftRefs, - pending_list_addr, _sentinelRef, _num_q); + pending_list_addr, sentinel_ref(), _num_q); task_executor->execute(tsk); } else { // Serial code: call the parent class's implementation for (int i = 0; i < _num_q * subclasses_of_ref; i++) { enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr); - _discoveredSoftRefs[i].set_head(_sentinelRef); + _discoveredSoftRefs[i].set_head(sentinel_ref()); _discoveredSoftRefs[i].set_length(0); } } @@ -391,15 +433,14 @@ BoolObjectClosure* is_alive); // End Of List. - inline bool has_next() const - { return _next != ReferenceProcessor::_sentinelRef; } - + inline bool has_next() const { return _next != ReferenceProcessor::sentinel_ref(); } + // Get oop to the Reference object. - inline oop obj() const { return _ref; } + inline oop obj() const { return _ref; } // Get oop to the referent object. - inline oop referent() const { return _referent; } - + inline oop referent() const { return _referent; } + // Returns true if referent is alive. inline bool is_referent_alive() const; @@ -407,43 +448,55 @@ // The "allow_null_referent" argument tells us to allow for the possibility // of a NULL referent in the discovered Reference object. This typically // happens in the case of concurrent collectors that may have done the - // discovery concurrently or interleaved with mutator execution. + // discovery concurrently, or interleaved, with mutator execution. inline void load_ptrs(DEBUG_ONLY(bool allow_null_referent)); // Move to the next discovered reference. inline void next(); - - // Remove the current reference from the list and move to the next. + + // Remove the current reference from the list inline void remove(); // Make the Reference object active again. inline void make_active() { java_lang_ref_Reference::set_next(_ref, NULL); } // Make the referent alive. - inline void make_referent_alive() { _keep_alive->do_oop(_referent_addr); } - + inline void make_referent_alive() { + if (UseCompressedOops) { + _keep_alive->do_oop((narrowOop*)_referent_addr); + } else { + _keep_alive->do_oop((oop*)_referent_addr); + } + } + // Update the discovered field. - inline void update_discovered() { _keep_alive->do_oop(_prev_next); } - + inline void update_discovered() { + // First _prev_next ref actually points into DiscoveredList (gross). + if (UseCompressedOops) { + _keep_alive->do_oop((narrowOop*)_prev_next); + } else { + _keep_alive->do_oop((oop*)_prev_next); + } + } + // NULL out referent pointer. - inline void clear_referent() { *_referent_addr = NULL; } + inline void clear_referent() { oop_store_raw(_referent_addr, NULL); } // Statistics NOT_PRODUCT( inline size_t processed() const { return _processed; } inline size_t removed() const { return _removed; } ) - -private: + inline void move_to_next(); private: DiscoveredList& _refs_list; - oop* _prev_next; + HeapWord* _prev_next; oop _ref; - oop* _discovered_addr; + HeapWord* _discovered_addr; oop _next; - oop* _referent_addr; + HeapWord* _referent_addr; oop _referent; OopClosure* _keep_alive; BoolObjectClosure* _is_alive; @@ -460,7 +513,7 @@ OopClosure* keep_alive, BoolObjectClosure* is_alive) : _refs_list(refs_list), - _prev_next(refs_list.head_ptr()), + _prev_next(refs_list.adr_head()), _ref(refs_list.head()), #ifdef ASSERT _first_seen(refs_list.head()), @@ -474,19 +527,18 @@ _is_alive(is_alive) { } -inline bool DiscoveredListIterator::is_referent_alive() const -{ +inline bool DiscoveredListIterator::is_referent_alive() const { return _is_alive->do_object_b(_referent); } -inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) -{ +inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) { _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref); - assert(_discovered_addr && (*_discovered_addr)->is_oop_or_null(), + oop discovered = java_lang_ref_Reference::discovered(_ref); + assert(_discovered_addr && discovered->is_oop_or_null(), "discovered field is bad"); - _next = *_discovered_addr; + _next = discovered; _referent_addr = java_lang_ref_Reference::referent_addr(_ref); - _referent = *_referent_addr; + _referent = java_lang_ref_Reference::referent(_ref); assert(Universe::heap()->is_in_reserved_or_null(_referent), "Wrong oop found in java.lang.Reference object"); assert(allow_null_referent ? @@ -495,32 +547,32 @@ "bad referent"); } -inline void DiscoveredListIterator::next() -{ +inline void DiscoveredListIterator::next() { _prev_next = _discovered_addr; move_to_next(); } -inline void DiscoveredListIterator::remove() -{ +inline void DiscoveredListIterator::remove() { assert(_ref->is_oop(), "Dropping a bad reference"); - // Clear the discovered_addr field so that the object does - // not look like it has been discovered. - *_discovered_addr = NULL; - // Remove Reference object from list. - *_prev_next = _next; + oop_store_raw(_discovered_addr, NULL); + // First _prev_next ref actually points into DiscoveredList (gross). + if (UseCompressedOops) { + // Remove Reference object from list. + oopDesc::encode_store_heap_oop_not_null((narrowOop*)_prev_next, _next); + } else { + // Remove Reference object from list. + oopDesc::store_heap_oop((oop*)_prev_next, _next); + } NOT_PRODUCT(_removed++); - move_to_next(); + _refs_list.dec_length(1); } -inline void DiscoveredListIterator::move_to_next() -{ +inline void DiscoveredListIterator::move_to_next() { _ref = _next; assert(_ref != _first_seen, "cyclic ref_list found"); NOT_PRODUCT(_processed++); } - // NOTE: process_phase*() are largely similar, and at a high level // merely iterate over the extant list applying a predicate to // each of its elements and possibly removing that element from the @@ -534,28 +586,29 @@ // referents are not alive, but that should be kept alive for policy reasons. // Keep alive the transitive closure of all such referents. void -ReferenceProcessor::process_phase1(DiscoveredList& refs_list_addr, +ReferenceProcessor::process_phase1(DiscoveredList& refs_list, ReferencePolicy* policy, BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc) { assert(policy != NULL, "Must have a non-NULL policy"); - DiscoveredListIterator iter(refs_list_addr, keep_alive, is_alive); + DiscoveredListIterator iter(refs_list, keep_alive, is_alive); // Decide which softly reachable refs should be kept alive. while (iter.has_next()) { iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive(); if (referent_is_dead && !policy->should_clear_reference(iter.obj())) { if (TraceReferenceGC) { - gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy", - (address)iter.obj(), iter.obj()->blueprint()->internal_name()); + gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy", + iter.obj(), iter.obj()->blueprint()->internal_name()); } + // Remove Reference object from list + iter.remove(); // Make the Reference object active again iter.make_active(); // keep the referent around iter.make_referent_alive(); - // Remove Reference object from list - iter.remove(); + iter.move_to_next(); } else { iter.next(); } @@ -573,28 +626,28 @@ // Traverse the list and remove any Refs that are not active, or // whose referents are either alive or NULL. void -ReferenceProcessor::pp2_work(DiscoveredList& refs_list_addr, +ReferenceProcessor::pp2_work(DiscoveredList& refs_list, BoolObjectClosure* is_alive, - OopClosure* keep_alive) -{ + OopClosure* keep_alive) { assert(discovery_is_atomic(), "Error"); - DiscoveredListIterator iter(refs_list_addr, keep_alive, is_alive); + DiscoveredListIterator iter(refs_list, keep_alive, is_alive); while (iter.has_next()) { iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); - DEBUG_ONLY(oop* next_addr = java_lang_ref_Reference::next_addr(iter.obj());) - assert(*next_addr == NULL, "Should not discover inactive Reference"); + DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());) + assert(next == NULL, "Should not discover inactive Reference"); if (iter.is_referent_alive()) { if (TraceReferenceGC) { - gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)", - (address)iter.obj(), iter.obj()->blueprint()->internal_name()); + gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)", + iter.obj(), iter.obj()->blueprint()->internal_name()); } // The referent is reachable after all. + // Remove Reference object from list. + iter.remove(); // Update the referent pointer as necessary: Note that this // should not entail any recursive marking because the // referent must already have been traversed. iter.make_referent_alive(); - // Remove Reference object from list - iter.remove(); + iter.move_to_next(); } else { iter.next(); } @@ -608,25 +661,29 @@ } void -ReferenceProcessor::pp2_work_concurrent_discovery( - DiscoveredList& refs_list_addr, - BoolObjectClosure* is_alive, - OopClosure* keep_alive, - VoidClosure* complete_gc) -{ +ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list, + BoolObjectClosure* is_alive, + OopClosure* keep_alive, + VoidClosure* complete_gc) { assert(!discovery_is_atomic(), "Error"); - DiscoveredListIterator iter(refs_list_addr, keep_alive, is_alive); + DiscoveredListIterator iter(refs_list, keep_alive, is_alive); while (iter.has_next()) { iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); - oop* next_addr = java_lang_ref_Reference::next_addr(iter.obj()); + HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj()); + oop next = java_lang_ref_Reference::next(iter.obj()); if ((iter.referent() == NULL || iter.is_referent_alive() || - *next_addr != NULL)) { - assert((*next_addr)->is_oop_or_null(), "bad next field"); + next != NULL)) { + assert(next->is_oop_or_null(), "bad next field"); // Remove Reference object from list iter.remove(); // Trace the cohorts iter.make_referent_alive(); - keep_alive->do_oop(next_addr); + if (UseCompressedOops) { + keep_alive->do_oop((narrowOop*)next_addr); + } else { + keep_alive->do_oop((oop*)next_addr); + } + iter.move_to_next(); } else { iter.next(); } @@ -642,15 +699,15 @@ } // Traverse the list and process the referents, by either -// either clearing them or keeping them (and their reachable +// clearing them or keeping them (and their reachable // closure) alive. void -ReferenceProcessor::process_phase3(DiscoveredList& refs_list_addr, +ReferenceProcessor::process_phase3(DiscoveredList& refs_list, bool clear_referent, BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete_gc) { - DiscoveredListIterator iter(refs_list_addr, keep_alive, is_alive); + DiscoveredListIterator iter(refs_list, keep_alive, is_alive); while (iter.has_next()) { iter.update_discovered(); iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); @@ -662,9 +719,9 @@ iter.make_referent_alive(); } if (TraceReferenceGC) { - gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending", - clear_referent ? "cleared " : "", - (address)iter.obj(), iter.obj()->blueprint()->internal_name()); + gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending", + clear_referent ? "cleared " : "", + iter.obj(), iter.obj()->blueprint()->internal_name()); } assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference"); // If discovery is concurrent, we may have objects with null referents, @@ -682,21 +739,26 @@ } void -ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& ref_list) { - oop obj = ref_list.head(); - while (obj != _sentinelRef) { - oop* discovered_addr = java_lang_ref_Reference::discovered_addr(obj); - obj = *discovered_addr; - *discovered_addr = NULL; +ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) { + oop obj = refs_list.head(); + while (obj != sentinel_ref()) { + oop discovered = java_lang_ref_Reference::discovered(obj); + java_lang_ref_Reference::set_discovered_raw(obj, NULL); + obj = discovered; } - ref_list.set_head(_sentinelRef); - ref_list.set_length(0); + refs_list.set_head(sentinel_ref()); + refs_list.set_length(0); } -void -ReferenceProcessor::abandon_partial_discovered_list_arr(DiscoveredList refs_lists[]) { - for (int i = 0; i < _num_q; i++) { - abandon_partial_discovered_list(refs_lists[i]); +void ReferenceProcessor::abandon_partial_discovery() { + // loop over the lists + for (int i = 0; i < _num_q * subclasses_of_ref; i++) { + if (TraceReferenceGC && PrintGCDetails && ((i % _num_q) == 0)) { + gclog_or_tty->print_cr( + "\nAbandoning %s discovered list", + list_name(i)); + } + abandon_partial_discovered_list(_discoveredSoftRefs[i]); } } @@ -780,13 +842,13 @@ // find an element to split the list on for (size_t j = 0; j < refs_to_move; ++j) { move_tail = new_head; - new_head = *java_lang_ref_Reference::discovered_addr(new_head); + new_head = java_lang_ref_Reference::discovered(new_head); } java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head()); ref_lists[to_idx].set_head(move_head); - ref_lists[to_idx].set_length(ref_lists[to_idx].length() + refs_to_move); + ref_lists[to_idx].inc_length(refs_to_move); ref_lists[from_idx].set_head(new_head); - ref_lists[from_idx].set_length(ref_lists[from_idx].length() - refs_to_move); + ref_lists[from_idx].dec_length(refs_to_move); } else { ++to_idx; } @@ -875,30 +937,28 @@ void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) { assert(!discovery_is_atomic(), "Else why call this method?"); DiscoveredListIterator iter(refs_list, NULL, NULL); - size_t length = refs_list.length(); while (iter.has_next()) { iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); - oop* next_addr = java_lang_ref_Reference::next_addr(iter.obj()); - assert((*next_addr)->is_oop_or_null(), "bad next field"); + oop next = java_lang_ref_Reference::next(iter.obj()); + assert(next->is_oop_or_null(), "bad next field"); // If referent has been cleared or Reference is not active, // drop it. - if (iter.referent() == NULL || *next_addr != NULL) { + if (iter.referent() == NULL || next != NULL) { debug_only( - if (PrintGCDetails && TraceReferenceGC) { - gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: " - INTPTR_FORMAT " with next field: " INTPTR_FORMAT - " and referent: " INTPTR_FORMAT, - (address)iter.obj(), (address)*next_addr, (address)iter.referent()); - } + if (PrintGCDetails && TraceReferenceGC) { + gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: " + INTPTR_FORMAT " with next field: " INTPTR_FORMAT + " and referent: " INTPTR_FORMAT, + iter.obj(), next, iter.referent()); + } ) // Remove Reference object from list iter.remove(); - --length; + iter.move_to_next(); } else { iter.next(); } } - refs_list.set_length(length); NOT_PRODUCT( if (PrintGCDetails && TraceReferenceGC) { gclog_or_tty->print( @@ -953,18 +1013,34 @@ return list; } -inline void ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& list, - oop obj, oop* discovered_addr) { +inline void +ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list, + oop obj, + HeapWord* discovered_addr) { assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller"); // First we must make sure this object is only enqueued once. CAS in a non null // discovered_addr. - oop retest = (oop)Atomic::cmpxchg_ptr(list.head(), discovered_addr, NULL); + oop current_head = refs_list.head(); + + // Note: In the case of G1, this pre-barrier is strictly + // not necessary because the only case we are interested in + // here is when *discovered_addr is NULL, so this will expand to + // nothing. As a result, I am just manually eliding this out for G1. + if (_discovered_list_needs_barrier && !UseG1GC) { + _bs->write_ref_field_pre((void*)discovered_addr, current_head); guarantee(false, "Needs to be fixed: YSR"); + } + oop retest = oopDesc::atomic_compare_exchange_oop(current_head, discovered_addr, + NULL); if (retest == NULL) { // This thread just won the right to enqueue the object. // We have separate lists for enqueueing so no synchronization // is necessary. - list.set_head(obj); - list.set_length(list.length() + 1); + refs_list.set_head(obj); + refs_list.inc_length(1); + if (_discovered_list_needs_barrier) { + _bs->write_ref_field((void*)discovered_addr, current_head); guarantee(false, "Needs to be fixed: YSR"); + } + } else { // If retest was non NULL, another thread beat us to it: // The reference has already been discovered... @@ -975,7 +1051,6 @@ } } - // We mention two of several possible choices here: // #0: if the reference object is not in the "originating generation" // (or part of the heap being collected, indicated by our "span" @@ -1009,8 +1084,8 @@ return false; } // We only enqueue active references. - oop* next_addr = java_lang_ref_Reference::next_addr(obj); - if (*next_addr != NULL) { + oop next = java_lang_ref_Reference::next(obj); + if (next != NULL) { return false; } @@ -1027,24 +1102,37 @@ // reachable. if (is_alive_non_header() != NULL) { oop referent = java_lang_ref_Reference::referent(obj); - // We'd like to assert the following: - // assert(referent != NULL, "Refs with null referents already filtered"); - // However, since this code may be executed concurrently with - // mutators, which can clear() the referent, it is not - // guaranteed that the referent is non-NULL. + // In the case of non-concurrent discovery, the last + // disjunct below should hold. It may not hold in the + // case of concurrent discovery because mutators may + // concurrently clear() a Reference. + assert(UseConcMarkSweepGC || UseG1GC || referent != NULL, + "Refs with null referents already filtered"); if (is_alive_non_header()->do_object_b(referent)) { return false; // referent is reachable } } + if (rt == REF_SOFT) { + // For soft refs we can decide now if these are not + // current candidates for clearing, in which case we + // can mark through them now, rather than delaying that + // to the reference-processing phase. Since all current + // time-stamp policies advance the soft-ref clock only + // at a major collection cycle, this is always currently + // accurate. + if (!_current_soft_ref_policy->should_clear_reference(obj)) { + return false; + } + } - oop* discovered_addr = java_lang_ref_Reference::discovered_addr(obj); - assert(discovered_addr != NULL && (*discovered_addr)->is_oop_or_null(), - "bad discovered field"); - if (*discovered_addr != NULL) { + HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj); + const oop discovered = java_lang_ref_Reference::discovered(obj); + assert(discovered->is_oop_or_null(), "bad discovered field"); + if (discovered != NULL) { // The reference has already been discovered... if (TraceReferenceGC) { - gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)", - (oopDesc*)obj, obj->blueprint()->internal_name()); + gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)", + obj, obj->blueprint()->internal_name()); } if (RefDiscoveryPolicy == ReferentBasedDiscovery) { // assumes that an object is not processed twice; @@ -1058,7 +1146,7 @@ // discovered twice except by concurrent collectors that potentially // trace the same Reference object twice. assert(UseConcMarkSweepGC, - "Only possible with a concurrent collector"); + "Only possible with an incremental-update concurrent collector"); return true; } } @@ -1086,14 +1174,26 @@ return false; // nothing special needs to be done } - // We do a raw store here, the field will be visited later when - // processing the discovered references. if (_discovery_is_mt) { add_to_discovered_list_mt(*list, obj, discovered_addr); } else { - *discovered_addr = list->head(); + // If "_discovered_list_needs_barrier", we do write barriers when + // updating the discovered reference list. Otherwise, we do a raw store + // here: the field will be visited later when processing the discovered + // references. + oop current_head = list->head(); + // As in the case further above, since we are over-writing a NULL + // pre-value, we can safely elide the pre-barrier here for the case of G1. + assert(discovered == NULL, "control point invariant"); + if (_discovered_list_needs_barrier && !UseG1GC) { // safe to elide for G1 + _bs->write_ref_field_pre((oop*)discovered_addr, current_head); + } + oop_store_raw(discovered_addr, current_head); + if (_discovered_list_needs_barrier) { + _bs->write_ref_field((oop*)discovered_addr, current_head); + } list->set_head(obj); - list->set_length(list->length() + 1); + list->inc_length(1); } // In the MT discovery case, it is currently possible to see @@ -1108,8 +1208,8 @@ if (TraceReferenceGC) { oop referent = java_lang_ref_Reference::referent(obj); if (PrintGCDetails) { - gclog_or_tty->print_cr("Enqueued reference (" INTPTR_FORMAT ": %s)", - (oopDesc*) obj, obj->blueprint()->internal_name()); + gclog_or_tty->print_cr("Enqueued reference (" INTPTR_FORMAT ": %s)", + obj, obj->blueprint()->internal_name()); } assert(referent->is_oop(), "Enqueued a bad referent"); } @@ -1134,45 +1234,48 @@ TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC, false, gclog_or_tty); for (int i = 0; i < _num_q; i++) { + if (yield->should_return()) { + return; + } preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive, keep_alive, complete_gc, yield); } } - if (yield->should_return()) { - return; - } // Weak references { TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC, false, gclog_or_tty); for (int i = 0; i < _num_q; i++) { + if (yield->should_return()) { + return; + } preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive, keep_alive, complete_gc, yield); } } - if (yield->should_return()) { - return; - } // Final references { TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC, false, gclog_or_tty); for (int i = 0; i < _num_q; i++) { + if (yield->should_return()) { + return; + } preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive, keep_alive, complete_gc, yield); } } - if (yield->should_return()) { - return; - } // Phantom references { TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC, false, gclog_or_tty); for (int i = 0; i < _num_q; i++) { + if (yield->should_return()) { + return; + } preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive, keep_alive, complete_gc, yield); } @@ -1181,20 +1284,25 @@ // Walk the given discovered ref list, and remove all reference objects // whose referents are still alive, whose referents are NULL or which -// are not active (have a non-NULL next field). NOTE: For this to work -// correctly, refs discovery can not be happening concurrently with this -// step. -void ReferenceProcessor::preclean_discovered_reflist( - DiscoveredList& refs_list, BoolObjectClosure* is_alive, - OopClosure* keep_alive, VoidClosure* complete_gc, YieldClosure* yield) { - +// are not active (have a non-NULL next field). NOTE: When we are +// thus precleaning the ref lists (which happens single-threaded today), +// we do not disable refs discovery to honour the correct semantics of +// java.lang.Reference. As a result, we need to be careful below +// that ref removal steps interleave safely with ref discovery steps +// (in this thread). +void +ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, + BoolObjectClosure* is_alive, + OopClosure* keep_alive, + VoidClosure* complete_gc, + YieldClosure* yield) { DiscoveredListIterator iter(refs_list, keep_alive, is_alive); - size_t length = refs_list.length(); while (iter.has_next()) { iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); - oop* next_addr = java_lang_ref_Reference::next_addr(iter.obj()); - if (iter.referent() == NULL || iter.is_referent_alive() || - *next_addr != NULL) { + oop obj = iter.obj(); + oop next = java_lang_ref_Reference::next(obj); + if (iter.referent() == NULL || iter.is_referent_alive() || + next != NULL) { // The referent has been cleared, or is alive, or the Reference is not // active; we need to trace and mark its cohort. if (TraceReferenceGC) { @@ -1203,16 +1311,20 @@ } // Remove Reference object from list iter.remove(); - --length; // Keep alive its cohort. iter.make_referent_alive(); - keep_alive->do_oop(next_addr); + if (UseCompressedOops) { + narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj); + keep_alive->do_oop(next_addr); + } else { + oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj); + keep_alive->do_oop(next_addr); + } + iter.move_to_next(); } else { iter.next(); } } - refs_list.set_length(length); - // Close the reachable set complete_gc->do_void(); @@ -1244,7 +1356,7 @@ #endif void ReferenceProcessor::verify() { - guarantee(_sentinelRef != NULL && _sentinelRef->is_oop(), "Lost _sentinelRef"); + guarantee(sentinel_ref() != NULL && sentinel_ref()->is_oop(), "Lost _sentinelRef"); } #ifndef PRODUCT @@ -1252,12 +1364,12 @@ guarantee(!_discovering_refs, "Discovering refs?"); for (int i = 0; i < _num_q * subclasses_of_ref; i++) { oop obj = _discoveredSoftRefs[i].head(); - while (obj != _sentinelRef) { + while (obj != sentinel_ref()) { oop next = java_lang_ref_Reference::discovered(obj); java_lang_ref_Reference::set_discovered(obj, (oop) NULL); obj = next; } - _discoveredSoftRefs[i].set_head(_sentinelRef); + _discoveredSoftRefs[i].set_head(sentinel_ref()); _discoveredSoftRefs[i].set_length(0); } }